remove unnecessary neutron files under neutron/agent

Change-Id: Id9657dd4bbb8da07988a22fd8531b49352cf2253
This commit is contained in:
Isaku Yamahata 2014-06-26 16:06:56 +09:00
parent 34b7393388
commit d0fd540c3c
15 changed files with 0 additions and 5620 deletions

View File

@ -1,622 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import eventlet
eventlet.monkey_patch()
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ovs_lib # noqa
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class DhcpAgent(manager.Manager):
OPTS = [
cfg.IntOpt('resync_interval', default=5,
help=_("Interval to resync.")),
cfg.StrOpt('dhcp_driver',
default='neutron.agent.linux.dhcp.Dnsmasq',
help=_("The driver used to manage the DHCP server.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicated network. Requires "
"enable_isolated_metadata = True")),
cfg.IntOpt('num_sync_threads', default=4,
help=_('Number of threads to use during sync process.')),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
]
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = []
self.conf = cfg.CONF
self.cache = NetworkCache()
self.root_helper = config.get_root_helper(self.conf)
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
if not os.path.isdir(dhcp_dir):
os.makedirs(dhcp_dir, 0o755)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf,
self.root_helper
)
for net_id in existing_networks:
net = dhcp.NetModel(self.conf.use_namespaces,
{"id": net_id,
"subnets": [],
"ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug(
_("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks"),
self.conf.dhcp_driver
)
def after_start(self):
self.run()
LOG.info(_("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance."""
LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'),
{'net': network.id, 'action': action})
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self.root_helper,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is '
'a conflict with its current state; please check '
'that the network and/or its subnet(s) still exist.')
% {'net_id': network.id, 'action': action})
except Exception as e:
self.schedule_resync(e)
if (isinstance(e, rpc_compat.RemoteError)
and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_("Network %s has been deleted."), network.id)
else:
LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.')
% {'net_id': network.id, 'action': action})
def schedule_resync(self, reason):
"""Schedule a resync for a given reason."""
self.needs_resync_reasons.append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self):
"""Sync the local DHCP state with Neutron."""
LOG.info(_('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception as e:
self.schedule_resync(e)
LOG.exception(_('Unable to sync network state on deleted '
'network %s'), deleted_id)
for network in active_networks:
pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall()
LOG.info(_('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
LOG.exception(_('Unable to sync network state.'))
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync_reasons:
# be careful to avoid a race with additions to list
# from other threads
reasons = self.needs_resync_reasons
self.needs_resync_reasons = []
for r in reasons:
LOG.debug(_("resync: %(reason)s"),
{"reason": r})
self.sync_state()
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def safe_get_network_info(self, network_id):
try:
network = self.plugin_rpc.get_network_info(network_id)
if not network:
LOG.warn(_('Network %s has been deleted.'), network_id)
return network
except Exception as e:
self.schedule_resync(e)
LOG.exception(_('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
network = self.safe_get_network_info(network_id)
if network:
self.configure_dhcp_for_network(network)
def safe_configure_dhcp_for_network(self, network):
try:
self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
self.enable_isolated_metadata_proxy(network)
self.cache.put(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
network = self.safe_get_network_info(network_id)
if not network:
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
updated_port = dhcp.DictModel(payload['port'])
network = self.cache.get_network_by_id(updated_port.network_id)
if network:
self.cache.put_port(updated_port)
self.call_driver('reload_allocations', network)
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
neutron_lookup_param = '--network_id=%s' % network.id
meta_cidr = netaddr.IPNetwork(dhcp.METADATA_DEFAULT_CIDR)
has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets)
if (self.conf.enable_metadata_network and has_metadata_subnet):
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
neutron_lookup_param = ('--router_id=%s' %
router_ports[0].device_id)
def callback(pid_file):
metadata_proxy_socket = cfg.CONF.metadata_proxy_socket
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
neutron_lookup_param,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%d' % dhcp.METADATA_PORT]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
network.id,
self.root_helper,
network.namespace)
pm.enable(callback)
def disable_isolated_metadata_proxy(self, network):
pm = external_process.ProcessManager(
self.conf,
network.id,
self.root_helper,
network.namespace)
pm.disable()
class DhcpPluginApi(rpc_compat.RpcProxy):
"""Agent side of the dhcp rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic, context, use_namespaces):
super(DhcpPluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.context = context
self.host = cfg.CONF.host
self.use_namespaces = use_namespaces
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
networks = self.call(self.context,
self.make_msg('get_active_networks_info',
host=self.host),
topic=self.topic)
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
network = self.call(self.context,
self.make_msg('get_network_info',
network_id=network_id,
host=self.host),
topic=self.topic)
if network:
return dhcp.NetModel(self.use_namespaces, network)
def get_dhcp_port(self, network_id, device_id):
"""Make a remote process call to get the dhcp port."""
port = self.call(self.context,
self.make_msg('get_dhcp_port',
network_id=network_id,
device_id=device_id,
host=self.host),
topic=self.topic)
if port:
return dhcp.DictModel(port)
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
port = self.call(self.context,
self.make_msg('create_dhcp_port',
port=port,
host=self.host),
topic=self.topic)
if port:
return dhcp.DictModel(port)
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
port = self.call(self.context,
self.make_msg('update_dhcp_port',
port_id=port_id,
port=port,
host=self.host),
topic=self.topic)
if port:
return dhcp.DictModel(port)
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
return self.call(self.context,
self.make_msg('release_dhcp_port',
network_id=network_id,
device_id=device_id,
host=self.host),
topic=self.topic)
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
return self.call(self.context,
self.make_msg('release_port_fixed_ip',
network_id=network_id,
subnet_id=subnet_id,
device_id=device_id,
host=self.host),
topic=self.topic)
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload})
LOG.info(_("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_("DHCP agent started"))
def register_options():
cfg.CONF.register_opts(DhcpAgent.OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.register_opts(interface.OPTS)
def main():
register_options()
common_config.init(sys.argv[1:])
config.setup_logging(cfg.CONF)
server = neutron_service.Service.create(
binary='neutron-dhcp-agent',
topic=topics.DHCP_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport')
service.launch(server).wait()

View File

@ -1,138 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import six
@six.add_metaclass(abc.ABCMeta)
class FirewallDriver(object):
"""Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
Note port attribute should have information of security group ids and
security group rules.
the dict of port should have
device : interface name
fixed_ips: ips of the device
mac_address: mac_address of the device
security_groups: [sgid, sgid]
security_group_rules : [ rule, rule ]
the rule must contain ethertype and direction
the rule may contain security_group_id,
protocol, port_min, port_max
source_ip_prefix, source_port_min,
source_port_max, dest_ip_prefix, and
remote_group_id
Note: source_group_ip in REST API should be converted by this rule
if direction is ingress:
remote_group_ip will be a source_ip_prefix
if direction is egress:
remote_group_ip will be a dest_ip_prefix
Note: remote_group_id in REST API should be converted by this rule
if direction is ingress:
remote_group_id will be a list of source_ip_prefix
if direction is egress:
remote_group_id will be a list of dest_ip_prefix
remote_group_id will also remaining membership update management
"""
def prepare_port_filter(self, port):
"""Prepare filters for the port.
This method should be called before the port is created.
"""
raise NotImplementedError()
def apply_port_filter(self, port):
"""Apply port filter.
Once this method returns, the port should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_port_filter.
"""
raise NotImplementedError()
def update_port_filter(self, port):
"""Refresh security group rules from data store
Gets called when an port gets added to or removed from
the security group the port is a member of or if the
group gains or looses a rule.
"""
raise NotImplementedError()
def remove_port_filter(self, port):
"""Stop filtering port."""
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of filtering rule."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of rules and apply the rules now."""
pass
@property
def ports(self):
"""Returns filtered ports."""
pass
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.filter_defer_apply_on()
try:
yield
finally:
self.filter_defer_apply_off()
class NoopFirewallDriver(FirewallDriver):
"""Noop Firewall Driver.
Firewall driver which does nothing.
This driver is for disabling the firewall functionality.
"""
def prepare_port_filter(self, port):
pass
def apply_port_filter(self, port):
pass
def update_port_filter(self, port):
pass
def remove_port_filter(self, port):
pass
def filter_defer_apply_on(self):
pass
def filter_defer_apply_off(self):
pass
@property
def ports(self):
return {}

View File

@ -1,56 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sylvain Afchain, eNovance SAS
# @author: Francois Eleouet, Orange
# @author: Mathieu Rohon, Orange
import abc
from oslo.config import cfg
import six
from neutron.common import log
@six.add_metaclass(abc.ABCMeta)
class L2populationRpcCallBackMixin(object):
@log.log
def add_fdb_entries(self, context, fdb_entries, host=None):
if not host or host == cfg.CONF.host:
self.fdb_add(context, fdb_entries)
@log.log
def remove_fdb_entries(self, context, fdb_entries, host=None):
if not host or host == cfg.CONF.host:
self.fdb_remove(context, fdb_entries)
@log.log
def update_fdb_entries(self, context, fdb_entries, host=None):
if not host or host == cfg.CONF.host:
self.fdb_update(context, fdb_entries)
@abc.abstractmethod
def fdb_add(self, context, fdb_entries):
pass
@abc.abstractmethod
def fdb_remove(self, context, fdb_entries):
pass
@abc.abstractmethod
def fdb_update(self, context, fdb_entries):
pass

View File

@ -1,990 +0,0 @@
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import eventlet
eventlet.monkey_patch()
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ovs_lib # noqa
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron import context
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common import processutils
from neutron.openstack.common import service
from neutron import service as neutron_service
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
RPC_LOOP_INTERVAL = 1
FLOATING_IP_CIDR_SUFFIX = '/32'
class L3PluginApi(rpc_compat.RpcProxy):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(L3PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
return self.call(context,
self.make_msg('sync_routers', host=self.host,
router_ids=router_ids),
topic=self.topic)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise rpc_compat.RemoteError: with TooManyExternalNetworks
as exc_type if there are
more than one external network
"""
return self.call(context,
self.make_msg('get_external_network_id',
host=self.host),
topic=self.topic)
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
return self.call(context,
self.make_msg('update_floatingip_statuses',
router_id=router_id,
fip_statuses=fip_statuses),
topic=self.topic,
version='1.1')
class RouterInfo(object):
def __init__(self, router_id, root_helper, use_namespaces, router):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
self.root_helper = root_helper
self.use_namespaces = use_namespaces
# Invoke the setter for establishing initial SNAT action
self.router = router
self.ns_name = NS_PREFIX + router_id if use_namespaces else None
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=root_helper,
#FIXME(danwent): use_ipv6=True,
namespace=self.ns_name)
self.routes = []
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self, self._router.get('gw_port'),
*args, action=self._snat_action)
self._snat_action = None
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
"""
RPC_API_VERSION = '1.1'
OPTS = [
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Neutron metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=3,
help=_("Send this many gratuitous ARPs for HA setup, if "
"less than or equal to 0, the feature is disabled")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" configure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=False,
help=_("Delete namespace after removing a router.")),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
]
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.root_helper = config.get_root_helper(self.conf)
self.router_info = {}
self._check_config_params()
try:
self.driver = importutils.import_object(
self.conf.interface_driver,
self.conf
)
except Exception:
msg = _("Error importing interface driver "
"'%s'") % self.conf.interface_driver
LOG.error(msg)
raise SystemExit(1)
self.context = context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
self.updated_routers = set()
self.removed_routers = set()
self.sync_progress = False
self._clean_stale_namespaces = self.conf.use_namespaces
self.rpc_loop = loopingcall.FixedIntervalLoopingCall(
self._rpc_loop)
self.rpc_loop.start(interval=RPC_LOOP_INTERVAL)
super(L3NATAgent, self).__init__(conf=self.conf)
self.target_ex_net_id = None
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
if not self.conf.use_namespaces and not self.conf.router_id:
msg = _('Router id is required if not using namespaces.')
LOG.error(msg)
raise SystemExit(1)
def _cleanup_namespaces(self, routers):
"""Destroy stale router namespaces on host when L3 agent restarts
This routine is called when self._clean_stale_namespaces is True.
The argument routers is the list of routers that are recorded in
the database as being hosted on this node.
"""
try:
root_ip = ip_lib.IPWrapper(self.root_helper)
host_namespaces = root_ip.get_namespaces(self.root_helper)
router_namespaces = set(ns for ns in host_namespaces
if ns.startswith(NS_PREFIX))
ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers)
ns_to_destroy = router_namespaces - ns_to_ignore
except RuntimeError:
LOG.exception(_('RuntimeError in obtaining router list '
'for namespace cleanup.'))
else:
self._destroy_stale_router_namespaces(ns_to_destroy)
def _destroy_stale_router_namespaces(self, router_namespaces):
"""Destroys the stale router namespaces
The argumenet router_namespaces is a list of stale router namespaces
As some stale router namespaces may not be able to be deleted, only
one attempt will be made to delete them.
"""
for ns in router_namespaces:
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns)
try:
self._destroy_router_namespace(ns)
except RuntimeError:
LOG.exception(_('Failed to destroy stale router namespace '
'%s'), ns)
self._clean_stale_namespaces = False
def _destroy_router_namespace(self, namespace):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=namespace,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(d.name,
bridge=self.conf.external_network_bridge,
namespace=namespace,
prefix=EXTERNAL_DEV_PREFIX)
if self.conf.router_delete_namespaces:
try:
ns_ip.netns.delete(namespace)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s')
LOG.exception(msg % namespace)
def _create_router_namespace(self, ri):
ip_wrapper_root = ip_lib.IPWrapper(self.root_helper)
ip_wrapper = ip_wrapper_root.ensure_namespace(ri.ns_name)
ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
def _fetch_external_net_id(self, force=False):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
if not force and self.target_ex_net_id:
return self.target_ex_net_id
try:
self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
self.context)
return self.target_ex_net_id
except rpc_compat.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.exc_type == 'TooManyExternalNetworks':
ctx.reraise = False
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
def _router_added(self, router_id, router):
ri = RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router)
self.router_info[router_id] = ri
if self.conf.use_namespaces:
self._create_router_namespace(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
super(L3NATAgent, self).process_router_add(ri)
if self.conf.enable_metadata_proxy:
self._spawn_metadata_proxy(ri.router_id, ri.ns_name)
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_("Info for router %s were not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(ri.router_id, ri.ns_name)
del self.router_info[router_id]
self._destroy_router_namespace(ri.ns_name)
def _spawn_metadata_proxy(self, router_id, ns_name):
def callback(pid_file):
metadata_proxy_socket = cfg.CONF.metadata_proxy_socket
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
'--router_id=%s' % router_id,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%s' % self.conf.metadata_port]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' %
router_id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
router_id,
self.root_helper,
ns_name)
pm.enable(callback)
def _destroy_metadata_proxy(self, router_id, ns_name):
pm = external_process.ProcessManager(
self.conf,
router_id,
self.root_helper,
ns_name)
pm.disable()
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def _get_existing_devices(self, ri):
ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper,
namespace=ri.ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
def process_router(self, ri):
ri.iptables_manager.defer_apply_on()
ex_gw_port = self._get_ex_gw_port(ri)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports if
p['id'] in current_port_ids and
p['id'] not in existing_port_ids]
old_ports = [p for p in ri.internal_ports if
p['id'] not in current_port_ids]
for p in new_ports:
self._set_subnet_info(p)
self.internal_network_added(ri, p['network_id'], p['id'],
p['ip_cidr'], p['mac_address'])
ri.internal_ports.append(p)
for p in old_ports:
self.internal_network_removed(ri, p['id'], p['ip_cidr'])
ri.internal_ports.remove(p)
existing_devices = self._get_existing_devices(ri)
current_internal_devs = set([n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX)])
current_port_devs = set([self.get_internal_device_name(id) for
id in current_port_ids])
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug(_('Deleting stale internal router device: %s'),
stale_dev)
self.driver.unplug(stale_dev,
namespace=ri.ns_name,
prefix=INTERNAL_DEV_PREFIX)
# Get IPv4 only internal CIDRs
internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports
if netaddr.IPNetwork(p['ip_cidr']).version == 4]
# TODO(salv-orlando): RouterInfo would be a better place for
# this logic too
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
ri.ex_gw_port and ri.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port and ex_gw_port != ri.ex_gw_port:
self._set_subnet_info(ex_gw_port)
self.external_gateway_added(ri, ex_gw_port,
interface_name, internal_cidrs)
elif not ex_gw_port and ri.ex_gw_port:
self.external_gateway_removed(ri, ri.ex_gw_port,
interface_name, internal_cidrs)
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug(_('Deleting stale external router device: %s'),
stale_dev)
self.driver.unplug(stale_dev,
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
# Process static routes for router
self.routes_updated(ri)
# Process SNAT rules for external gateway
ri.perform_snat_action(self._handle_router_snat_rules,
internal_cidrs, interface_name)
# Process SNAT/DNAT rules for floating IPs
fip_statuses = {}
try:
if ex_gw_port:
existing_floating_ips = ri.floating_ips
self.process_router_floating_ip_nat_rules(ri)
ri.iptables_manager.defer_apply_off()
# Once NAT rules for floating IPs are safely in place
# configure their addresses on the external gateway port
fip_statuses = self.process_router_floating_ip_addresses(
ri, ex_gw_port)
except Exception:
# TODO(salv-orlando): Less broad catching
# All floating IPs must be put in error state
for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
if ex_gw_port:
# Identify floating IPs which were disabled
ri.floating_ips = set(fip_statuses.keys())
for fip_id in existing_floating_ips - ri.floating_ips:
fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
# Update floating IP status on the neutron server
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
# Update ex_gw_port and enable_snat on the router info cache
ri.ex_gw_port = ex_gw_port
ri.enable_snat = ri.router.get('enable_snat')
def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs,
interface_name, action):
# Remove all the rules
# This is safe because if use_namespaces is set as False
# then the agent can only configure one router, otherwise
# each router's SNAT rules will be in their own namespace
ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
ri.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action if add_rules
if action == 'add_rules' and ex_gw_port:
# ex_gw_port should not be None in this case
# NAT rules are added only if ex_gw_port has an IPv4 address
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
internal_cidrs,
interface_name)
for rule in rules:
ri.iptables_manager.ipv4['nat'].add_rule(*rule)
break
ri.iptables_manager.apply()
def process_router_floating_ip_nat_rules(self, ri):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
# Loop once to ensure that floating ips are configured.
for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
ri.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
ri.iptables_manager.apply()
def process_router_floating_ip_addresses(self, ri, ex_gw_port):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
interface_name = self.get_external_device_name(ex_gw_port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name)
existing_cidrs = set([addr['cidr'] for addr in device.addr.list()])
new_cidrs = set()
# Loop once to ensure that floating ips are configured.
for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_ip = fip['floating_ip_address']
ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
new_cidrs.add(ip_cidr)
if ip_cidr not in existing_cidrs:
net = netaddr.IPNetwork(ip_cidr)
try:
device.addr.add(net.version, ip_cidr, str(net.broadcast))
except (processutils.UnknownArgumentError,
processutils.ProcessExecutionError):
# any exception occurred here should cause the floating IP
# to be set in error state
fip_statuses[fip['id']] = (
l3_constants.FLOATINGIP_STATUS_ERROR)
LOG.warn(_("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
continue
# As GARP is processed in a distinct thread the call below
# won't raise an exception to be handled.
self._send_gratuitous_arp_packet(
ri, interface_name, fip_ip)
fip_statuses[fip['id']] = (
l3_constants.FLOATINGIP_STATUS_ACTIVE)
# Clean up addresses that no longer belong on the gateway interface.
for ip_cidr in existing_cidrs - new_cidrs:
if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
net = netaddr.IPNetwork(ip_cidr)
device.addr.delete(net.version, ip_cidr)
return fip_statuses
def _get_ex_gw_port(self, ri):
return ri.router.get('gw_port')
def _arping(self, ri, interface_name, ip_address):
arping_cmd = ['arping', '-A',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
ip_address]
try:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name)
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
except Exception as e:
LOG.error(_("Failed sending gratuitous ARP: %s"), str(e))
def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
if self.conf.send_arp_for_ha > 0:
eventlet.spawn_n(self._arping, ri, interface_name, ip_address)
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def external_gateway_added(self, ri, ex_gw_port,
interface_name, internal_cidrs):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX
for ip in floating_ips]
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ri.ns_name,
gateway=ex_gw_port['subnet'].get('gateway_ip'),
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips)
ip_address = ex_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
def external_gateway_removed(self, ri, ex_gw_port,
interface_name, internal_cidrs):
self.driver.unplug(interface_name,
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def metadata_filter_rules(self):
rules = []
if self.conf.enable_metadata_proxy:
rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' % self.conf.metadata_port))
return rules
def metadata_nat_rules(self):
rules = []
if self.conf.enable_metadata_proxy:
rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %s' % self.conf.metadata_port))
return rules
def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs,
interface_name):
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})]
for cidr in internal_cidrs:
rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
return rules
def internal_network_added(self, ri, network_id, port_id,
internal_cidr, mac_address):
interface_name = self.get_internal_device_name(port_id)
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ri.ns_name,
prefix=INTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [internal_cidr],
namespace=ri.ns_name)
ip_address = internal_cidr.split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
def internal_network_removed(self, ri, port_id, internal_cidr):
interface_name = self.get_internal_device_name(port_id)
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name):
self.driver.unplug(interface_name, namespace=ri.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def internal_network_nat_rules(self, ex_gw_ip, internal_cidr):
rules = [('snat', '-s %s -j SNAT --to-source %s' %
(internal_cidr, ex_gw_ip))]
return rules
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug(_('Got router deleted notification for %s'), router_id)
self.removed_routers.add(router_id)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug(_('Got routers updated notification :%s'), routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
self.updated_routers.update(routers)
def router_removed_from_agent(self, context, payload):
LOG.debug(_('Got router removed from agent :%r'), payload)
self.removed_routers.add(payload['router_id'])
def router_added_to_agent(self, context, payload):
LOG.debug(_('Got router added to agent :%r'), payload)
self.routers_updated(context, payload)
def _process_routers(self, routers, all_routers=False):
pool = eventlet.GreenPool()
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
target_ex_net_id = self._fetch_external_net_id()
# if routers are all the routers we have (They are from router sync on
# starting or when error occurs during running), we seek the
# routers which should be removed.
# If routers are from server side notification, we seek them
# from subset of incoming routers and ones we have now.
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
r['id'] != self.conf.router_id):
continue
ex_net_id = (r['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
continue
if (target_ex_net_id and ex_net_id and
ex_net_id != target_ex_net_id):
# Double check that our single external_net_id has not changed
# by forcing a check by RPC.
if (ex_net_id != self._fetch_external_net_id(force=True)):
continue
cur_router_ids.add(r['id'])
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
pool.spawn_n(self.process_router, ri)
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
pool.spawn_n(self._router_removed, router_id)
pool.waitall()
@lockutils.synchronized('l3-agent', 'neutron-')
def _rpc_loop(self):
# _rpc_loop and _sync_routers_task will not be
# executed in the same time because of lock.
# so we can clear the value of updated_routers
# and removed_routers, but they can be updated by
# updated_routers and removed_routers rpc call
try:
LOG.debug(_("Starting RPC loop for %d updated routers"),
len(self.updated_routers))
if self.updated_routers:
# We're capturing and clearing the list, and will
# process the "captured" updates in this loop,
# and any updates that happen due to a context switch
# will be picked up on the next pass.
updated_routers = set(self.updated_routers)
self.updated_routers.clear()
router_ids = list(updated_routers)
routers = self.plugin_rpc.get_routers(
self.context, router_ids)
# routers with admin_state_up=false will not be in the fetched
fetched = set([r['id'] for r in routers])
self.removed_routers.update(updated_routers - fetched)
self._process_routers(routers)
self._process_router_delete()
LOG.debug(_("RPC loop successfully completed"))
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
def _process_router_delete(self):
current_removed_routers = list(self.removed_routers)
for router_id in current_removed_routers:
self._router_removed(router_id)
self.removed_routers.remove(router_id)
def _router_ids(self):
if not self.conf.use_namespaces:
return [self.conf.router_id]
@periodic_task.periodic_task
@lockutils.synchronized('l3-agent', 'neutron-')
def _sync_routers_task(self, context):
if self.services_sync:
super(L3NATAgent, self).process_services_sync(context)
LOG.debug(_("Starting _sync_routers_task - fullsync:%s"),
self.fullsync)
if not self.fullsync:
return
try:
router_ids = self._router_ids()
self.updated_routers.clear()
self.removed_routers.clear()
routers = self.plugin_rpc.get_routers(
context, router_ids)
LOG.debug(_('Processing :%r'), routers)
self._process_routers(routers, all_routers=True)
self.fullsync = False
LOG.debug(_("_sync_routers_task successfully completed"))
except rpc_compat.RPCException:
LOG.exception(_("Failed synchronizing routers due to RPC error"))
self.fullsync = True
return
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
# Resync is not necessary for the cleanup of stale
# namespaces.
if self._clean_stale_namespaces:
self._cleanup_namespaces(routers)
def after_start(self):
LOG.info(_("L3 agent started"))
def _update_routing_table(self, ri, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self, ri):
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug(_("Added route entry is '%s'"), route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table(ri, 'replace', route)
for route in removes:
LOG.debug(_("Removed route entry is '%s'"), route)
self._update_routing_table(ri, 'delete', route)
ri.routes = new_routes
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
LOG.debug(_("Report state task started"))
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = self._get_ex_gw_port(ri)
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
LOG.debug(_("Report state task successfully completed"))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'):
conf = cfg.CONF
conf.register_opts(L3NATAgent.OPTS)
config.register_interface_driver_opts_helper(conf)
config.register_use_namespaces_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
common_config.init(sys.argv[1:])
config.setup_logging(conf)
server = neutron_service.Service.create(
binary='neutron-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()

View File

@ -1,908 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import socket
import sys
import netaddr
from oslo.config import cfg
import six
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import utils as commonutils
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('dhcp_confs',
default='$state_path/dhcp',
help=_('Location to store DHCP server config files')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('dnsmasq_config_file',
default='',
help=_('Override the default dnsmasq settings with this file')),
cfg.ListOpt('dnsmasq_dns_servers',
help=_('Comma-separated list of the DNS servers which will be '
'used as forwarders.'),
deprecated_name='dnsmasq_dns_server'),
cfg.BoolOpt('dhcp_delete_namespaces', default=False,
help=_("Delete namespace after removing a dhcp server.")),
cfg.IntOpt(
'dnsmasq_lease_max',
default=(2 ** 24),
help=_('Limit number of leases to prevent a denial-of-service.')),
]
IPV4 = 4
IPV6 = 6
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in self.iteritems():
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, root_helper='sudo',
version=None, plugin=None):
self.conf = conf
self.network = network
self.root_helper = root_helper
self.device_manager = DeviceManager(self.conf,
self.root_helper, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError
class DhcpLocalProcess(DhcpBase):
PORTS = []
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
interface_name = self.device_manager.setup(self.network)
if self.active:
self.restart()
elif self._enable_dhcp():
self.interface_name = interface_name
self.spawn_process()
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
pid = self.pid
if pid:
if self.active:
cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d '
'does not exist, performing cleanup'),
{'net_id': self.network.id, 'pid': pid})
if not retain_port:
self.device_manager.destroy(self.network,
self.interface_name)
else:
LOG.debug(_('No DHCP started for %s'), self.network.id)
self._remove_config_files()
if not retain_port:
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s')
LOG.exception(msg, self.network.namespace)
def _remove_config_files(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
shutil.rmtree(conf_dir, ignore_errors=True)
def get_conf_file_name(self, kind, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
if ensure_conf_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter and converter(f.read()) or f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg % file_name)
return None
@property
def pid(self):
"""Last known pid for the DHCP process spawned for this network."""
return self._get_value_from_conf_file('pid', int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.network.id in f.readline()
except IOError:
return False
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface',
ensure_conf_dir=True)
utils.replace_file(interface_file_path, value)
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH'
MINIMUM_VERSION = 2.59
@classmethod
def check_version(cls):
ver = 0
try:
cmd = ['dnsmasq', '--version']
out = utils.execute(cmd)
ver = re.findall("\d+.\d+", out)[0]
is_valid_version = float(ver) >= cls.MINIMUM_VERSION
if not is_valid_version:
LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY! '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
except (OSError, RuntimeError, IndexError, ValueError):
LOG.warning(_('Unable to determine dnsmasq version. '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
return float(ver)
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
def spawn_process(self):
"""Spawns a Dnsmasq process for the network."""
env = {
self.NEUTRON_NETWORK_ID_KEY: self.network.id,
}
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % self.get_conf_file_name(
'pid', ensure_conf_dir=True),
'--dhcp-hostsfile=%s' % self._output_hosts_file(),
'--addn-hosts=%s' % self._output_addn_hosts_file(),
'--dhcp-optsfile=%s' % self._output_opts_file(),
'--leasefile-ro',
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# TODO(mark): how do we indicate other options
# ra-only, slaac, ra-nameservers, and ra-stateless.
mode = 'static'
if self.version >= self.MINIMUM_VERSION:
set_tag = 'set:'
else:
set_tag = ''
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
(set_tag, self._TAG_PREFIX % i,
cidr.network, mode, lease))
possible_leases += cidr.size
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd, addl_env=env)
def _release_lease(self, mac_address, ip):
"""Release a DHCP lease."""
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd)
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug(_('Killing dhcpmasq for network since all subnets have '
'turned off DHCP: %s'), self.network.id)
return
self._release_unused_leases()
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
if self.active:
cmd = ['kill', '-HUP', self.pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid)
LOG.debug(_('Reloading allocations for network: %s'), self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
host_name, # Host name.
name, # Host name and domain name in the format 'hostname.domain'.
)
"""
for port in self.network.ports:
for alloc in port.fixed_ips:
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = '%s.%s' % (hostname, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn)
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug(_('Building host file: %s'), filename)
for (port, alloc, hostname, name) in self._iter_hosts():
set_tag = ''
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
ip_address = alloc.ip_address
if netaddr.valid_ipv6(ip_address):
ip_address = '[%s]' % ip_address
LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'),
{"mac": port.mac_address, "name": name,
"ip": ip_address})
if getattr(port, 'extra_dhcp_opts', False):
if self.version >= self.MINIMUM_VERSION:
set_tag = 'set:'
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
set_tag, port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug(_('Done building host file %s'), filename)
return filename
def _read_hosts_file_leases(self, filename):
leases = set()
if os.path.exists(filename):
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
leases.add((host[2], host[0]))
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
for port in self.network.ports:
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address))
for ip, mac in old_leases - new_leases:
self._release_lease(mac, ip)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmaq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for (port, alloc, hostname, fqdn) in self._iter_hosts():
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
options = []
dhcp_ips = collections.defaultdict(list)
subnet_idx_map = {}
for i, subnet in enumerate(self.network.subnets):
if not subnet.enable_dhcp:
continue
if subnet.dns_nameservers:
options.append(
self._format_option(i, 'dns-server',
','.join(subnet.dns_nameservers)))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_idx_map[subnet.id] = i
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == "0.0.0.0/0":
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if self._enable_metadata(subnet):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if host_routes:
if gateway and subnet.ip_version == 4:
host_routes.append("%s,%s" % ("0.0.0.0/0", gateway))
options.append(
self._format_option(i, 'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(i, WIN2k3_STATIC_DNS,
','.join(host_routes)))
if subnet.ip_version == 4:
if gateway:
options.append(self._format_option(i, 'router', gateway))
else:
options.append(self._format_option(i, 'router'))
for port in self.network.ports:
if getattr(port, 'extra_dhcp_opts', False):
options.extend(
self._format_option(port.id, opt.opt_name, opt.opt_value)
for opt in port.extra_dhcp_opts)
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_idx_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
if len(ips) > 1:
options.append(self._format_option(i,
'dns-server',
','.join(ips)))
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(
self.interface_name,
self.root_helper,
self.network.namespace
)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, tag, option, *args):
"""Format DHCP option by option name or code."""
if self.version >= self.MINIMUM_VERSION:
set_tag = 'tag:'
else:
set_tag = ''
option = str(option)
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
option = 'option:%s' % option
return ','.join((set_tag + tag, '%s' % option) + args)
def _enable_metadata(self, subnet):
'''Determine if the metadata route will be pushed to hosts on subnet.
If subnet has a Neutron router attached, we want the hosts to get
metadata from the router's proxy via their default route instead.
'''
if self.conf.enable_isolated_metadata and subnet.ip_version == 4:
if subnet.gateway_ip is None:
return True
else:
for port in self.network.ports:
if port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF:
for alloc in port.fixed_ips:
if alloc.subnet_id == subnet.id:
return False
return True
else:
return False
@classmethod
def lease_update(cls):
network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY)
dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY)
action = sys.argv[1]
if action not in ('add', 'del', 'old'):
sys.exit()
mac_address = sys.argv[2]
ip_address = sys.argv[3]
if action == 'del':
lease_remaining = 0
else:
lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
data = dict(network_id=network_id, mac_address=mac_address,
ip_address=ip_address, lease_remaining=lease_remaining)
if os.path.exists(dhcp_relay_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(dhcp_relay_socket)
sock.send(jsonutils.dumps(data))
sock.close()
class DeviceManager(object):
def __init__(self, conf, root_helper, plugin):
self.conf = conf
self.root_helper = root_helper
self.plugin = plugin
if not conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
try:
self.driver = importutils.import_object(
conf.interface_driver, conf)
except Exception as e:
msg = (_("Error importing interface driver '%(driver)s': "
"%(inner)s") % {'driver': conf.interface_driver,
'inner': e})
LOG.error(msg)
raise SystemExit(1)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name,
self.root_helper,
network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
msg = _('Removing gateway for dhcp netns on net %s')
LOG.debug(msg, network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
# check for a reserved DHCP port
if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.'),
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if dhcp_port:
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.'), {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
if not dhcp_port:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
self.root_helper,
network.namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name,
self.root_helper)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network, interface_name)
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
self.driver.unplug(device_name, namespace=network.namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))

View File

@ -1,383 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
from neutron.agent import firewall
from neutron.agent.linux import iptables_manager
from neutron.common import constants
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SG_CHAIN = 'sg-chain'
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
SPOOF_FILTER = 'spoof-filter'
CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
EGRESS_DIRECTION: 'o',
SPOOF_FILTER: 's'}
LINUX_DEV_LEN = 14
class IptablesFirewallDriver(firewall.FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out',
EGRESS_DIRECTION: 'physdev-in'}
def __init__(self):
self.iptables = iptables_manager.IptablesManager(
root_helper=cfg.CONF.AGENT.root_helper,
use_ipv6=True)
# list of port which has security group
self.filtered_ports = {}
self._add_fallback_chain_v4v6()
self._defer_apply = False
self._pre_defer_filtered_ports = None
@property
def ports(self):
return self.filtered_ports
def prepare_port_filter(self, port):
LOG.debug(_("Preparing device (%s) filter"), port['device'])
self._remove_chains()
self.filtered_ports[port['device']] = port
# each security group has it own chains
self._setup_chains()
self.iptables.apply()
def update_port_filter(self, port):
LOG.debug(_("Updating device (%s) filter"), port['device'])
if port['device'] not in self.filtered_ports:
LOG.info(_('Attempted to update port filter which is not '
'filtered %s'), port['device'])
return
self._remove_chains()
self.filtered_ports[port['device']] = port
self._setup_chains()
self.iptables.apply()
def remove_port_filter(self, port):
LOG.debug(_("Removing device (%s) filter"), port['device'])
if not self.filtered_ports.get(port['device']):
LOG.info(_('Attempted to remove port filter which is not '
'filtered %r'), port)
return
self._remove_chains()
self.filtered_ports.pop(port['device'], None)
self._setup_chains()
self.iptables.apply()
def _setup_chains(self):
"""Setup ingress and egress chain for a port."""
if not self._defer_apply:
self._setup_chains_apply(self.filtered_ports)
def _setup_chains_apply(self, ports):
self._add_chain_by_name_v4v6(SG_CHAIN)
for port in ports.values():
self._setup_chain(port, INGRESS_DIRECTION)
self._setup_chain(port, EGRESS_DIRECTION)
self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
def _remove_chains(self):
"""Remove ingress and egress chain for a port."""
if not self._defer_apply:
self._remove_chains_apply(self.filtered_ports)
def _remove_chains_apply(self, ports):
for port in ports.values():
self._remove_chain(port, INGRESS_DIRECTION)
self._remove_chain(port, EGRESS_DIRECTION)
self._remove_chain(port, SPOOF_FILTER)
self._remove_chain_by_name_v4v6(SG_CHAIN)
def _setup_chain(self, port, DIRECTION):
self._add_chain(port, DIRECTION)
self._add_rule_by_security_group(port, DIRECTION)
def _remove_chain(self, port, DIRECTION):
chain_name = self._port_chain_name(port, DIRECTION)
self._remove_chain_by_name_v4v6(chain_name)
def _add_fallback_chain_v4v6(self):
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def _add_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
def _remove_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].ensure_remove_chain(chain_name)
self.iptables.ipv6['filter'].ensure_remove_chain(chain_name)
def _add_rule_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def _get_device_name(self, port):
return port['device']
def _add_chain(self, port, direction):
chain_name = self._port_chain_name(port, direction)
self._add_chain_by_name_v4v6(chain_name)
# Note(nati) jump to the security group chain (SG_CHAIN)
# This is needed because the packet may much two rule in port
# if the two port is in the same host
# We accept the packet at the end of SG_CHAIN.
# jump to the security group chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
SG_CHAIN)]
self._add_rule_to_chain_v4v6('FORWARD', jump_rule, jump_rule)
# jump to the chain based on the device
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
chain_name)]
self._add_rule_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule)
if direction == EGRESS_DIRECTION:
self._add_rule_to_chain_v4v6('INPUT', jump_rule, jump_rule)
def _split_sgr_by_ethertype(self, security_group_rules):
ipv4_sg_rules = []
ipv6_sg_rules = []
for rule in security_group_rules:
if rule.get('ethertype') == constants.IPv4:
ipv4_sg_rules.append(rule)
elif rule.get('ethertype') == constants.IPv6:
if rule.get('protocol') == 'icmp':
rule['protocol'] = 'icmpv6'
ipv6_sg_rules.append(rule)
return ipv4_sg_rules, ipv6_sg_rules
def _select_sgr_by_direction(self, port, direction):
return [rule
for rule in port.get('security_group_rules', [])
if rule['direction'] == direction]
def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
if mac_ip_pairs:
chain_name = self._port_chain_name(port, SPOOF_FILTER)
table.add_chain(chain_name)
for mac, ip in mac_ip_pairs:
if ip is None:
# If fixed_ips is [] this rule will be added to the end
# of the list after the allowed_address_pair rules.
table.add_rule(chain_name,
'-m mac --mac-source %s -j RETURN'
% mac)
else:
table.add_rule(chain_name,
'-m mac --mac-source %s -s %s -j RETURN'
% (mac, ip))
table.add_rule(chain_name, '-j DROP')
rules.append('-j $%s' % chain_name)
def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
mac_ipv6_pairs):
if netaddr.IPNetwork(ip_address).version == 4:
mac_ipv4_pairs.append((mac, ip_address))
else:
mac_ipv6_pairs.append((mac, ip_address))
def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
#Note(nati) allow dhcp or RA packet
ipv4_rules += ['-p udp -m udp --sport 68 --dport 67 -j RETURN']
ipv6_rules += ['-p icmpv6 -j RETURN']
ipv6_rules += ['-p udp -m udp --sport 546 --dport 547 -j RETURN']
mac_ipv4_pairs = []
mac_ipv6_pairs = []
if isinstance(port.get('allowed_address_pairs'), list):
for address_pair in port['allowed_address_pairs']:
self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
address_pair['ip_address'],
mac_ipv4_pairs,
mac_ipv6_pairs)
for ip in port['fixed_ips']:
self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
mac_ipv4_pairs, mac_ipv6_pairs)
if not port['fixed_ips']:
mac_ipv4_pairs.append((port['mac_address'], None))
mac_ipv6_pairs.append((port['mac_address'], None))
self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
mac_ipv4_pairs, ipv4_rules)
self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
mac_ipv6_pairs, ipv6_rules)
def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
#Note(nati) Drop dhcp packet from VM
ipv4_rules += ['-p udp -m udp --sport 67 --dport 68 -j DROP']
ipv6_rules += ['-p udp -m udp --sport 547 --dport 546 -j DROP']
def _accept_inbound_icmpv6(self):
# Allow multicast listener, neighbor solicitation and
# neighbor advertisement into the instance
icmpv6_rules = []
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type]
return icmpv6_rules
def _add_rule_by_security_group(self, port, direction):
chain_name = self._port_chain_name(port, direction)
# select rules for current direction
security_group_rules = self._select_sgr_by_direction(port, direction)
# split groups by ip version
# for ipv4, iptables command is used
# for ipv6, iptables6 command is used
ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
security_group_rules)
ipv4_iptables_rule = []
ipv6_iptables_rule = []
if direction == EGRESS_DIRECTION:
self._spoofing_rule(port,
ipv4_iptables_rule,
ipv6_iptables_rule)
self._drop_dhcp_rule(ipv4_iptables_rule, ipv6_iptables_rule)
if direction == INGRESS_DIRECTION:
ipv6_iptables_rule += self._accept_inbound_icmpv6()
ipv4_iptables_rule += self._convert_sgr_to_iptables_rules(
ipv4_sg_rules)
ipv6_iptables_rule += self._convert_sgr_to_iptables_rules(
ipv6_sg_rules)
self._add_rule_to_chain_v4v6(chain_name,
ipv4_iptables_rule,
ipv6_iptables_rule)
def _convert_sgr_to_iptables_rules(self, security_group_rules):
iptables_rules = []
self._drop_invalid_packets(iptables_rules)
self._allow_established(iptables_rules)
for rule in security_group_rules:
# These arguments MUST be in the format iptables-save will
# display them: source/dest, protocol, sport, dport, target
# Otherwise the iptables_manager code won't be able to find
# them to preserve their [packet:byte] counts.
args = self._ip_prefix_arg('s',
rule.get('source_ip_prefix'))
args += self._ip_prefix_arg('d',
rule.get('dest_ip_prefix'))
args += self._protocol_arg(rule.get('protocol'))
args += self._port_arg('sport',
rule.get('protocol'),
rule.get('source_port_range_min'),
rule.get('source_port_range_max'))
args += self._port_arg('dport',
rule.get('protocol'),
rule.get('port_range_min'),
rule.get('port_range_max'))
args += ['-j RETURN']
iptables_rules += [' '.join(args)]
iptables_rules += ['-j $sg-fallback']
return iptables_rules
def _drop_invalid_packets(self, iptables_rules):
# Always drop invalid packets
iptables_rules += ['-m state --state ' 'INVALID -j DROP']
return iptables_rules
def _allow_established(self, iptables_rules):
# Allow established connections
iptables_rules += ['-m state --state RELATED,ESTABLISHED -j RETURN']
return iptables_rules
def _protocol_arg(self, protocol):
if not protocol:
return []
iptables_rule = ['-p', protocol]
# iptables always adds '-m protocol' for udp and tcp
if protocol in ['udp', 'tcp']:
iptables_rule += ['-m', protocol]
return iptables_rule
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6']
or not port_range_min):
return []
if protocol in ['icmp', 'icmpv6']:
# Note(xuhanp): port_range_min/port_range_max represent
# icmp type/code when protocol is icmp or icmpv6
# icmp code can be 0 so we cannot use "if port_range_max" here
if port_range_max is not None:
return ['--%s-type' % protocol,
'%s/%s' % (port_range_min, port_range_max)]
return ['--%s-type' % protocol, '%s' % port_range_min]
elif port_range_min == port_range_max:
return ['--%s' % direction, '%s' % (port_range_min,)]
else:
return ['-m', 'multiport',
'--%ss' % direction,
'%s:%s' % (port_range_min, port_range_max)]
def _ip_prefix_arg(self, direction, ip_prefix):
#NOTE (nati) : source_group_id is converted to list of source_
# ip_prefix in server side
if ip_prefix:
return ['-%s' % direction, ip_prefix]
return []
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
def filter_defer_apply_on(self):
if not self._defer_apply:
self.iptables.defer_apply_on()
self._pre_defer_filtered_ports = dict(self.filtered_ports)
self._defer_apply = True
def filter_defer_apply_off(self):
if self._defer_apply:
self._defer_apply = False
self._remove_chains_apply(self._pre_defer_filtered_ports)
self._pre_defer_filtered_ports = None
self._setup_chains_apply(self.filtered_ports)
self.iptables.defer_apply_off()
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
OVS_HYBRID_TAP_PREFIX = 'tap'
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
def _get_device_name(self, port):
return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]

View File

@ -1,668 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc

View File

@ -1,564 +0,0 @@
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as common_utils
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
# TODO(JLH) Should we remove the explicit include of the ovs plugin here
from neutron.plugins.openvswitch.common import constants
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
OPTS = [
cfg.IntOpt('ovs_vsctl_timeout',
default=DEFAULT_OVS_VSCTL_TIMEOUT,
help=_('Timeout in seconds for ovs-vsctl commands')),
]
cfg.CONF.register_opts(OPTS)
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name=" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if not check_error:
ctxt.reraise = False
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 2\n' in str(e):
ctxt.reraise = False
return False
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 1\n' in str(e):
ctxt.reraise = False
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def set_controller(self, controller_names):
vsctl_command = ['--', 'set-controller', self.br_name]
vsctl_command.extend(controller_names)
self.run_vsctl(vsctl_command, check_error=True)
def del_controller(self):
self.run_vsctl(['--', 'del-controller', self.br_name],
check_error=True)
def get_controller(self):
res = self.run_vsctl(['--', 'get-controller', self.br_name],
check_error=True)
if res:
return res.strip().split('\n')
return res
def set_secure_mode(self):
self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'],
check_error=True)
def set_protocols(self, protocols):
self.run_vsctl(['--', 'set', 'bridge', self.br_name,
"protocols=%s" % protocols],
check_error=True)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self):
self.destroy()
self.create()
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
ofport = self.db_get_val("Interface", port_name, "ofport")
# This can return a non-integer string, like '[]' so ensure a
# common failure case
try:
int(ofport)
return ofport
except ValueError:
return constants.INVALID_OFPORT
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def add_flow(self, **kwargs):
flow_str = _build_flow_expr_str(kwargs, 'add')
if self.defer_apply_flows:
self.deferred_flows['add'] += flow_str + '\n'
else:
self.run_ofctl("add-flow", [flow_str])
def mod_flow(self, **kwargs):
flow_str = _build_flow_expr_str(kwargs, 'mod')
if self.defer_apply_flows:
self.deferred_flows['mod'] += flow_str + '\n'
else:
self.run_ofctl("mod-flows", [flow_str])
def delete_flows(self, **kwargs):
flow_expr_str = _build_flow_expr_str(kwargs, 'del')
if self.defer_apply_flows:
self.deferred_flows['del'] += flow_expr_str + '\n'
else:
self.run_ofctl("del-flows", [flow_expr_str])
def dump_flows_for_table(self, table):
retval = None
flow_str = "table=%s" % table
flows = self.run_ofctl("dump-flows", [flow_str])
if flows:
retval = '\n'.join(item for item in flows.splitlines()
if 'NXST' not in item)
return retval
def defer_apply_on(self):
LOG.debug(_('defer_apply_on'))
self.defer_apply_flows = True
def defer_apply_off(self):
LOG.debug(_('defer_apply_off'))
# Note(ethuleau): stash flows and disable deferred mode. Then apply
# flows from the stashed reference to be sure to not purge flows that
# were added between two ofctl commands.
stashed_deferred_flows, self.deferred_flows = (
self.deferred_flows, {'add': '', 'mod': '', 'del': ''}
)
self.defer_apply_flows = False
for action, flows in stashed_deferred_flows.items():
if flows:
LOG.debug(_('Applying following deferred flows '
'to bridge %s'), self.br_name)
for line in flows.splitlines():
LOG.debug(_('%(action)s: %(flow)s'),
{'action': action, 'flow': line})
self.run_ofctl('%s-flows' % action, ['-'], flows)
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT,
dont_fragment=True):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == p_const.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.append(("options:df_default=%s" %
bool(dont_fragment)).lower())
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
ofport = self.get_port_ofport(port_name)
if (tunnel_type == p_const.TYPE_VXLAN and
ofport == constants.INVALID_OFPORT):
LOG.error(_('Unable to create VXLAN tunnel port. Please ensure '
'that an openvswitch version that supports VXLAN is '
'installed.'))
return ofport
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids",
check_error=True)
ofport = self.db_get_val("Interface", name, "ofport",
check_error=True)
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids,ofport',
'list', 'Interface']
result = self.run_vsctl(args, check_error=True)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
# Do not consider VIFs which aren't yet ready
# This can happen when ofport values are either [] or ["set", []]
# We will therefore consider only integer values for ofport
ofport = row[2]
try:
int_ofport = int(ofport)
except (ValueError, TypeError):
LOG.warn(_("Found not yet ready openvswitch port: %s"), row)
else:
if int_ofport > 0:
if ("iface-id" in external_ids and
"attached-mac" in external_ids):
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not
# automatically synced to OVS from XAPI, we grab it
# from XAPI directly
iface_id = self.get_xapi_iface_id(
external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
else:
LOG.warn(_("Found failed openvswitch port: %s"), row)
return edge_ports
def get_port_tag_dict(self):
"""Get a dict of port names and associated vlan tags.
e.g. the returned dict is of the following form::
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
The TAG ID is only available in the "Port" table and is not available
in the "Interface" table queried by the get_vif_port_set() method.
"""
port_names = self.get_port_name_list()
args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port']
result = self.run_vsctl(args, check_error=True)
port_tag_dict = {}
if not result:
return port_tag_dict
for name, tag in jsonutils.loads(result)['data']:
if name not in port_names:
continue
# 'tag' can be [u'set', []] or an integer
if isinstance(tag, list):
tag = tag[1]
port_tag_dict[name] = tag
return port_tag_dict
def get_vif_port_by_id(self, port_id):
args = ['--format=json', '--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
json_result = jsonutils.loads(result)
try:
# Retrieve the indexes of the columns we're looking for
headings = json_result['headings']
ext_ids_idx = headings.index('external_ids')
name_idx = headings.index('name')
ofport_idx = headings.index('ofport')
# If data attribute is missing or empty the line below will raise
# an exeception which will be captured in this block.
# We won't deal with the possibility of ovs-vsctl return multiple
# rows since the interface identifier is unique
data = json_result['data'][0]
port_name = data[name_idx]
switch = get_bridge_for_iface(self.root_helper, port_name)
if switch != self.br_name:
LOG.info(_("Port: %(port_name)s is on %(switch)s,"
" not on %(br_name)s"), {'port_name': port_name,
'switch': switch,
'br_name': self.br_name})
return
ofport = data[ofport_idx]
# ofport must be integer otherwise return None
if not isinstance(ofport, int) or ofport == -1:
LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a "
"positive integer"), {'ofport': ofport,
'vif': port_id})
return
# Find VIF's mac address in external ids
ext_id_dict = dict((item[0], item[1]) for item in
data[ext_ids_idx][1])
vif_mac = ext_id_dict['attached-mac']
return VifPort(port_name, ofport, port_id, vif_mac, self)
except Exception as e:
LOG.warn(_("Unable to parse interface details. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
def _build_flow_expr_str(flow_dict, cmd):
flow_expr_arr = []
actions = None
if cmd == 'add':
flow_expr_arr.append("hard_timeout=%s" %
flow_dict.pop('hard_timeout', '0'))
flow_expr_arr.append("idle_timeout=%s" %
flow_dict.pop('idle_timeout', '0'))
flow_expr_arr.append("priority=%s" %
flow_dict.pop('priority', '1'))
elif 'priority' in flow_dict:
msg = _("Cannot match priority on flow deletion or modification")
raise exceptions.InvalidInput(error_message=msg)
if cmd != 'del':
if "actions" not in flow_dict:
msg = _("Must specify one or more actions on flow addition"
" or modification")
raise exceptions.InvalidInput(error_message=msg)
actions = "actions=%s" % flow_dict.pop('actions')
for key, value in flow_dict.iteritems():
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
if actions:
flow_expr_arr.append(actions)
return ','.join(flow_expr_arr)
def ofctl_arg_supported(root_helper, cmd, args):
'''Verify if ovs-ofctl binary supports command with specific args.
:param root_helper: utility to use when running shell cmds.
:param cmd: ovs-vsctl command to use for test.
:param args: arguments to test with command.
:returns: a boolean if the args supported.
'''
supported = True
br_name = 'br-test-%s' % common_utils.get_random_string(6)
test_br = OVSBridge(br_name, root_helper)
test_br.reset_bridge()
full_args = ["ovs-ofctl", cmd, test_br.br_name] + args
try:
utils.execute(full_args, root_helper=root_helper)
except Exception:
supported = False
test_br.destroy()
return supported

View File

@ -1,107 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from neutron.agent.linux import async_process
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class OvsdbMonitor(async_process.AsyncProcess):
"""Manages an invocation of 'ovsdb-client monitor'."""
def __init__(self, table_name, columns=None, format=None,
root_helper=None, respawn_interval=None):
cmd = ['ovsdb-client', 'monitor', table_name]
if columns:
cmd.append(','.join(columns))
if format:
cmd.append('--format=%s' % format)
super(OvsdbMonitor, self).__init__(cmd,
root_helper=root_helper,
respawn_interval=respawn_interval)
def _read_stdout(self):
data = self._process.stdout.readline()
if not data:
return
self._stdout_lines.put(data)
LOG.debug(_('Output received from ovsdb monitor: %s') % data)
return data
def _read_stderr(self):
data = super(OvsdbMonitor, self)._read_stderr()
if data:
LOG.error(_('Error received from ovsdb monitor: %s') % data)
# Do not return value to ensure that stderr output will
# stop the monitor.
class SimpleInterfaceMonitor(OvsdbMonitor):
"""Monitors the Interface table of the local host's ovsdb for changes.
The has_updates() method indicates whether changes to the ovsdb
Interface table have been detected since the monitor started or
since the previous access.
"""
def __init__(self, root_helper=None, respawn_interval=None):
super(SimpleInterfaceMonitor, self).__init__(
'Interface',
columns=['name', 'ofport'],
format='json',
root_helper=root_helper,
respawn_interval=respawn_interval,
)
self.data_received = False
@property
def is_active(self):
return (self.data_received and
self._kill_event and
not self._kill_event.ready())
@property
def has_updates(self):
"""Indicate whether the ovsdb Interface table has been updated.
True will be returned if the monitor process is not active.
This 'failing open' minimizes the risk of falsely indicating
the absence of updates at the expense of potential false
positives.
"""
return bool(list(self.iter_stdout())) or not self.is_active
def start(self, block=False, timeout=5):
super(SimpleInterfaceMonitor, self).start()
if block:
eventlet.timeout.Timeout(timeout)
while not self.is_active:
eventlet.sleep()
def _kill(self, *args, **kwargs):
self.data_received = False
super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs)
def _read_stdout(self):
data = super(SimpleInterfaceMonitor, self)._read_stdout()
if data and not self.data_received:
self.data_received = True
return data

View File

@ -1,17 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -1,392 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import hashlib
import hmac
import os
import socket
import sys
import eventlet
eventlet.monkey_patch()
import httplib2
from neutronclient.v2_0 import client
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from neutron.agent.common import config as agent_conf
from neutron.agent import rpc as agent_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.openstack.common.cache import cache
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import service
from neutron import wsgi
LOG = logging.getLogger(__name__)
class MetadataProxyHandler(object):
OPTS = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.BoolOpt('auth_insecure',
default=False,
help=_("Turn off verification of the certificate for"
" ssl")),
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('endpoint_type',
default='adminURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
def __init__(self, conf):
self.conf = conf
self.auth_info = {}
if self.conf.cache_url:
self._cache = cache.get_cache(self.conf.cache_url)
else:
self._cache = False
def _get_neutron_client(self):
qclient = client.Client(
username=self.conf.admin_user,
password=self.conf.admin_password,
tenant_name=self.conf.admin_tenant_name,
auth_url=self.conf.auth_url,
auth_strategy=self.conf.auth_strategy,
region_name=self.conf.auth_region,
token=self.auth_info.get('auth_token'),
insecure=self.conf.auth_insecure,
ca_cert=self.conf.auth_ca_cert,
endpoint_url=self.auth_info.get('endpoint_url'),
endpoint_type=self.conf.endpoint_type
)
return qclient
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
try:
LOG.debug(_("Request: %s"), req)
instance_id, tenant_id = self._get_instance_and_tenant_id(req)
if instance_id:
return self._proxy_request(instance_id, tenant_id, req)
else:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception(_("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
@utils.cache_method_results
def _get_router_networks(self, router_id):
"""Find all networks connected to given router."""
qclient = self._get_neutron_client()
internal_ports = qclient.list_ports(
device_id=router_id,
device_owner=n_const.DEVICE_OWNER_ROUTER_INTF)['ports']
return tuple(p['network_id'] for p in internal_ports)
@utils.cache_method_results
def _get_ports_for_remote_address(self, remote_address, networks):
"""Get list of ports that has given ip address and are part of
given networks.
:param networks: list of networks in which the ip address will be
searched for
"""
qclient = self._get_neutron_client()
return qclient.list_ports(
network_id=networks,
fixed_ips=['ip_address=%s' % remote_address])['ports']
def _get_ports(self, remote_address, network_id=None, router_id=None):
"""Search for all ports that contain passed ip address and belongs to
given network.
If no network is passed ports are searched on all networks connected to
given router. Either one of network_id or router_id must be passed.
"""
if network_id:
networks = (network_id,)
elif router_id:
networks = self._get_router_networks(router_id)
else:
raise TypeError(_("Either one of parameter network_id or router_id"
" must be passed to _get_ports method."))
return self._get_ports_for_remote_address(remote_address, networks)
def _get_instance_and_tenant_id(self, req):
qclient = self._get_neutron_client()
remote_address = req.headers.get('X-Forwarded-For')
network_id = req.headers.get('X-Neutron-Network-ID')
router_id = req.headers.get('X-Neutron-Router-ID')
ports = self._get_ports(remote_address, network_id, router_id)
self.auth_info = qclient.get_auth_info()
if len(ports) == 1:
return ports[0]['device_id'], ports[0]['tenant_id']
return None, None
def _proxy_request(self, instance_id, tenant_id, req):
headers = {
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
'X-Instance-ID': instance_id,
'X-Tenant-ID': tenant_id,
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
}
nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip,
self.conf.nova_metadata_port)
url = urlparse.urlunsplit((
self.conf.nova_metadata_protocol,
nova_ip_port,
req.path_info,
req.query_string,
''))
h = httplib2.Http(ca_certs=self.conf.auth_ca_cert,
disable_ssl_certificate_validation=
self.conf.nova_metadata_insecure)
if self.conf.nova_client_cert and self.conf.nova_client_priv_key:
h.add_certificate(self.conf.nova_client_priv_key,
self.conf.nova_client_cert,
nova_ip_port)
resp, content = h.request(url, method=req.method, headers=headers,
body=req.body)
if resp.status == 200:
LOG.debug(str(resp))
req.response.content_type = resp['content-type']
req.response.body = content
return req.response
elif resp.status == 403:
msg = _(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
)
LOG.warn(msg)
return webob.exc.HTTPForbidden()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.warn(msg)
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
def _sign_instance_id(self, instance_id):
return hmac.new(self.conf.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
def __init__(self, request, client_address, server):
if client_address == '':
client_address = ('<local>', 0)
# base class is old-style, so super does not work properly
eventlet.wsgi.HttpProtocol.__init__(self, request, client_address,
server)
class WorkerService(wsgi.WorkerService):
def start(self):
self._server = self._service.pool.spawn(self._service._run,
self._application,
self._service._socket)
class UnixDomainWSGIServer(wsgi.Server):
def __init__(self, name):
self._socket = None
self._launcher = None
self._server = None
super(UnixDomainWSGIServer, self).__init__(name)
def start(self, application, file_socket, workers, backlog):
self._socket = eventlet.listen(file_socket,
family=socket.AF_UNIX,
backlog=backlog)
if workers < 1:
# For the case where only one process is required.
self._server = self.pool.spawn_n(self._run, application,
self._socket)
else:
# Minimize the cost of checking for child exit by extending the
# wait interval past the default of 0.01s.
self._launcher = service.ProcessLauncher(wait_interval=1.0)
self._server = WorkerService(self, application)
self._launcher.launch_service(self._server, workers=workers)
def _run(self, application, socket):
"""Start a WSGI service in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket,
application,
custom_pool=self.pool,
protocol=UnixDomainHttpProtocol,
log=logging.WritableLogger(logger))
class UnixDomainMetadataProxy(object):
OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket')),
cfg.IntOpt('metadata_workers',
default=utils.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
def __init__(self, conf):
self.conf = conf
dirname = os.path.dirname(cfg.CONF.metadata_proxy_socket)
if os.path.isdir(dirname):
try:
os.unlink(cfg.CONF.metadata_proxy_socket)
except OSError:
with excutils.save_and_reraise_exception() as ctxt:
if not os.path.exists(cfg.CONF.metadata_proxy_socket):
ctxt.reraise = False
else:
os.makedirs(dirname, 0o755)
self._init_state_reporting()
def _init_state_reporting(self):
self.context = context.get_admin_context_without_session()
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-metadata-agent',
'host': cfg.CONF.host,
'topic': 'N/A',
'configurations': {
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
'nova_metadata_port': cfg.CONF.nova_metadata_port,
},
'start_flag': True,
'agent_type': n_const.AGENT_TYPE_METADATA}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(
self.context,
self.agent_state,
use_call=self.agent_state.get('start_flag'))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_('Neutron server does not support state report.'
' State report for this agent will be disabled.'))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
return
self.agent_state.pop('start_flag', None)
def run(self):
server = UnixDomainWSGIServer('neutron-metadata-agent')
server.start(MetadataProxyHandler(self.conf),
self.conf.metadata_proxy_socket,
workers=self.conf.metadata_workers,
backlog=self.conf.metadata_backlog)
server.wait()
def main():
cfg.CONF.register_opts(UnixDomainMetadataProxy.OPTS)
cfg.CONF.register_opts(MetadataProxyHandler.OPTS)
cache.register_oslo_configs(cfg.CONF)
cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5')
agent_conf.register_agent_state_opts_helper(cfg.CONF)
config.init(sys.argv[1:])
config.setup_logging(cfg.CONF)
utils.log_opt_values(LOG)
proxy = UnixDomainMetadataProxy(cfg.CONF)
proxy.run()

View File

@ -1,184 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import httplib
import socket
import eventlet
eventlet.monkey_patch()
import httplib2
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from neutron.agent.linux import daemon
from neutron.common import config
from neutron.common import utils
from neutron.openstack.common import log as logging
from neutron import wsgi
LOG = logging.getLogger(__name__)
class UnixDomainHTTPConnection(httplib.HTTPConnection):
"""Connection class for HTTP over UNIX domain socket."""
def __init__(self, host, port=None, strict=None, timeout=None,
proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.timeout:
self.sock.settimeout(self.timeout)
self.sock.connect(cfg.CONF.metadata_proxy_socket)
class NetworkMetadataProxyHandler(object):
"""Proxy AF_INET metadata request through Unix Domain socket.
The Unix domain socket allows the proxy access resource that are not
accessible within the isolated tenant context.
"""
def __init__(self, network_id=None, router_id=None):
self.network_id = network_id
self.router_id = router_id
if network_id is None and router_id is None:
msg = _('network_id and router_id are None. One must be provided.')
raise ValueError(msg)
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
LOG.debug(_("Request: %s"), req)
try:
return self._proxy_request(req.remote_addr,
req.method,
req.path_info,
req.query_string,
req.body)
except Exception:
LOG.exception(_("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
def _proxy_request(self, remote_address, method, path_info,
query_string, body):
headers = {
'X-Forwarded-For': remote_address,
}
if self.router_id:
headers['X-Neutron-Router-ID'] = self.router_id
else:
headers['X-Neutron-Network-ID'] = self.network_id
url = urlparse.urlunsplit((
'http',
'169.254.169.254', # a dummy value to make the request proper
path_info,
query_string,
''))
h = httplib2.Http()
resp, content = h.request(
url,
method=method,
headers=headers,
body=body,
connection_type=UnixDomainHTTPConnection)
if resp.status == 200:
LOG.debug(resp)
LOG.debug(content)
response = webob.Response()
response.status = resp.status
response.headers['Content-Type'] = resp['content-type']
response.body = content
return response
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.debug(msg)
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
class ProxyDaemon(daemon.Daemon):
def __init__(self, pidfile, port, network_id=None, router_id=None):
uuid = network_id or router_id
super(ProxyDaemon, self).__init__(pidfile, uuid=uuid)
self.network_id = network_id
self.router_id = router_id
self.port = port
def run(self):
handler = NetworkMetadataProxyHandler(
self.network_id,
self.router_id)
proxy = wsgi.Server('neutron-network-metadata-proxy')
proxy.start(handler, self.port)
proxy.wait()
def main():
opts = [
cfg.StrOpt('network_id',
help=_('Network that will have instance metadata '
'proxied.')),
cfg.StrOpt('router_id',
help=_('Router that will have connected instances\' '
'metadata proxied.')),
cfg.StrOpt('pid_file',
help=_('Location of pid file of this process.')),
cfg.BoolOpt('daemonize',
default=True,
help=_('Run as daemon.')),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port to listen for metadata server "
"requests.")),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket'))
]
cfg.CONF.register_cli_opts(opts)
# Don't get the default configuration file
cfg.CONF(project='neutron', default_config_files=[])
config.setup_logging(cfg.CONF)
utils.log_opt_values(LOG)
proxy = ProxyDaemon(cfg.CONF.pid_file,
cfg.CONF.metadata_port,
network_id=cfg.CONF.network_id,
router_id=cfg.CONF.router_id)
if cfg.CONF.daemonize:
proxy.start()
else:
proxy.run()

View File

@ -1,176 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from neutron.agent.common import config as agent_config
from neutron.agent import dhcp_agent
from neutron.agent import l3_agent
from neutron.agent.linux import dhcp
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
NS_MANGLING_PATTERN = ('(%s|%s)' % (dhcp.NS_PREFIX, l3_agent.NS_PREFIX) +
attributes.UUID_PATTERN)
class FakeDhcpPlugin(object):
"""Fake RPC plugin to bypass any RPC calls."""
def __getattribute__(self, name):
def fake_method(*args):
pass
return fake_method
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
cli_opts = [
cfg.BoolOpt('force',
default=False,
help=_('Delete the namespace by removing all devices.')),
]
conf = cfg.CONF
conf.register_cli_opts(cli_opts)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_use_namespaces_opts_helper(conf)
agent_config.register_root_helper(conf)
conf.register_opts(dhcp.OPTS)
conf.register_opts(dhcp_agent.DhcpAgent.OPTS)
conf.register_opts(interface.OPTS)
return conf
def kill_dhcp(conf, namespace):
"""Disable DHCP for a network if DHCP is still active."""
root_helper = agent_config.get_root_helper(conf)
network_id = namespace.replace(dhcp.NS_PREFIX, '')
dhcp_driver = importutils.import_object(
conf.dhcp_driver,
conf=conf,
network=dhcp.NetModel(conf.use_namespaces, {'id': network_id}),
root_helper=root_helper,
plugin=FakeDhcpPlugin())
if dhcp_driver.active:
dhcp_driver.disable()
def eligible_for_deletion(conf, namespace, force=False):
"""Determine whether a namespace is eligible for deletion.
Eligibility is determined by having only the lo device or if force
is passed as a parameter.
"""
# filter out namespaces without UUID as the name
if not re.match(NS_MANGLING_PATTERN, namespace):
return False
root_helper = agent_config.get_root_helper(conf)
ip = ip_lib.IPWrapper(root_helper, namespace)
return force or ip.namespace_is_empty()
def unplug_device(conf, device):
try:
device.link.delete()
except RuntimeError:
root_helper = agent_config.get_root_helper(conf)
# Maybe the device is OVS port, so try to delete
bridge_name = ovs_lib.get_bridge_for_iface(root_helper, device.name)
if bridge_name:
bridge = ovs_lib.OVSBridge(bridge_name, root_helper)
bridge.delete_port(device.name)
else:
LOG.debug(_('Unable to find bridge for device: %s'), device.name)
def destroy_namespace(conf, namespace, force=False):
"""Destroy a given namespace.
If force is True, then dhcp (if it exists) will be disabled and all
devices will be forcibly removed.
"""
try:
root_helper = agent_config.get_root_helper(conf)
ip = ip_lib.IPWrapper(root_helper, namespace)
if force:
kill_dhcp(conf, namespace)
# NOTE: The dhcp driver will remove the namespace if is it empty,
# so a second check is required here.
if ip.netns.exists(namespace):
for device in ip.get_devices(exclude_loopback=True):
unplug_device(conf, device)
ip.garbage_collect_namespace()
except Exception:
LOG.exception(_('Error unable to destroy namespace: %s'), namespace)
def main():
"""Main method for cleaning up network namespaces.
This method will make two passes checking for namespaces to delete. The
process will identify candidates, sleep, and call garbage collect. The
garbage collection will re-verify that the namespace meets the criteria for
deletion (ie it is empty). The period of sleep and the 2nd pass allow
time for the namespace state to settle, so that the check prior deletion
will re-confirm the namespace is empty.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --force flag should only be used as part of the cleanup of a devstack
installation as it will blindly purge namespaces and their devices. This
option also kills any lingering DHCP instances.
"""
conf = setup_conf()
conf()
config.setup_logging(conf)
root_helper = agent_config.get_root_helper(conf)
# Identify namespaces that are candidates for deletion.
candidates = [ns for ns in
ip_lib.IPWrapper.get_namespaces(root_helper)
if eligible_for_deletion(conf, ns, conf.force)]
if candidates:
eventlet.sleep(2)
for namespace in candidates:
destroy_namespace(conf, namespace, conf.force)

View File

@ -1,112 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.common import config
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
opts = [
cfg.BoolOpt('ovs_all_ports',
default=False,
help=_('True to delete all ports on all the OpenvSwitch '
'bridges. False to delete ports created by '
'Neutron on integration and external network '
'bridges.'))
]
conf = cfg.CONF
conf.register_cli_opts(opts)
conf.register_opts(l3_agent.L3NATAgent.OPTS)
conf.register_opts(interface.OPTS)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_use_namespaces_opts_helper(conf)
agent_config.register_root_helper(conf)
return conf
def collect_neutron_ports(bridges, root_helper):
"""Collect ports created by Neutron from OVS."""
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge, root_helper)
ports += [port.port_name for port in ovs.get_vif_ports()]
return ports
def delete_neutron_ports(ports, root_helper):
"""Delete non-internal ports created by Neutron
Non-internal OVS ports need to be removed manually.
"""
for port in ports:
if ip_lib.device_exists(port):
device = ip_lib.IPDevice(port, root_helper)
device.link.delete()
LOG.info(_("Delete %s"), port)
def main():
"""Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron.
"""
conf = setup_conf()
conf()
config.setup_logging(conf)
configuration_bridges = set([conf.ovs_integration_bridge,
conf.external_network_bridge])
ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper))
available_configuration_bridges = configuration_bridges & ovs_bridges
if conf.ovs_all_ports:
bridges = ovs_bridges
else:
bridges = available_configuration_bridges
# Collect existing ports created by Neutron on configuration bridges.
# After deleting ports from OVS bridges, we cannot determine which
# ports were created by Neutron, so port information is collected now.
ports = collect_neutron_ports(available_configuration_bridges,
conf.AGENT.root_helper)
for bridge in bridges:
LOG.info(_("Cleaning %s"), bridge)
ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper)
ovs.delete_ports(all_ports=conf.ovs_all_ports)
# Remove remaining ports created by Neutron (usually veth pair)
delete_neutron_ports(ports, conf.AGENT.root_helper)
LOG.info(_("OVS cleanup completed successfully"))

View File

@ -1,303 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.config import cfg
from neutron.common import topics
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SG_RPC_VERSION = "1.1"
security_group_opts = [
cfg.StrOpt(
'firewall_driver',
help=_('Driver for security groups firewall in the L2 agent')),
cfg.BoolOpt(
'enable_security_group',
default=True,
help=_(
'Controls whether the neutron security group API is enabled '
'in the server. It should be false when using no security '
'groups or using the nova security group API.'))
]
cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP')
#This is backward compatibility check for Havana
def _is_valid_driver_combination():
return ((cfg.CONF.SECURITYGROUP.enable_security_group and
(cfg.CONF.SECURITYGROUP.firewall_driver and
cfg.CONF.SECURITYGROUP.firewall_driver !=
'neutron.agent.firewall.NoopFirewallDriver')) or
(not cfg.CONF.SECURITYGROUP.enable_security_group and
(cfg.CONF.SECURITYGROUP.firewall_driver ==
'neutron.agent.firewall.NoopFirewallDriver' or
cfg.CONF.SECURITYGROUP.firewall_driver is None)
))
def is_firewall_enabled():
if not _is_valid_driver_combination():
LOG.warn(_("Driver configuration doesn't match with "
"enable_security_group"))
return cfg.CONF.SECURITYGROUP.enable_security_group
def _disable_extension(extension, aliases):
if extension in aliases:
aliases.remove(extension)
def disable_security_group_extension_by_config(aliases):
if not is_firewall_enabled():
LOG.info(_('Disabled security-group extension.'))
_disable_extension('security-group', aliases)
LOG.info(_('Disabled allowed-address-pairs extension.'))
_disable_extension('allowed-address-pairs', aliases)
class SecurityGroupServerRpcApiMixin(object):
"""A mix-in that enable SecurityGroup support in plugin rpc."""
def security_group_rules_for_devices(self, context, devices):
LOG.debug(_("Get security group rules "
"for devices via rpc %r"), devices)
return self.call(context,
self.make_msg('security_group_rules_for_devices',
devices=devices),
version=SG_RPC_VERSION,
topic=self.topic)
class SecurityGroupAgentRpcCallbackMixin(object):
"""A mix-in that enable SecurityGroup agent
support in agent implementations.
"""
#mix-in object should be have sg_agent
sg_agent = None
def _security_groups_agent_not_set(self):
LOG.warning(_("Security group agent binding currently not set. "
"This should be set by the end of the init "
"process."))
def security_groups_rule_updated(self, context, **kwargs):
"""Callback for security group rule update.
:param security_groups: list of updated security_groups
"""
security_groups = kwargs.get('security_groups', [])
LOG.debug(
_("Security group rule updated on remote: %s"), security_groups)
if not self.sg_agent:
return self._security_groups_agent_not_set()
self.sg_agent.security_groups_rule_updated(security_groups)
def security_groups_member_updated(self, context, **kwargs):
"""Callback for security group member update.
:param security_groups: list of updated security_groups
"""
security_groups = kwargs.get('security_groups', [])
LOG.debug(
_("Security group member updated on remote: %s"), security_groups)
if not self.sg_agent:
return self._security_groups_agent_not_set()
self.sg_agent.security_groups_member_updated(security_groups)
def security_groups_provider_updated(self, context, **kwargs):
"""Callback for security group provider update."""
LOG.debug(_("Provider rule updated"))
if not self.sg_agent:
return self._security_groups_agent_not_set()
self.sg_agent.security_groups_provider_updated()
class SecurityGroupAgentRpcMixin(object):
"""A mix-in that enable SecurityGroup agent
support in agent implementations.
"""
def init_firewall(self, defer_refresh_firewall=False):
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver
LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver)
if not _is_valid_driver_combination():
LOG.warn(_("Driver configuration doesn't match "
"with enable_security_group"))
if not firewall_driver:
firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver'
self.firewall = importutils.import_object(firewall_driver)
# The following flag will be set to true if port filter must not be
# applied as soon as a rule or membership notification is received
self.defer_refresh_firewall = defer_refresh_firewall
# Stores devices for which firewall should be refreshed when
# deferred refresh is enabled.
self.devices_to_refilter = set()
# Flag raised when a global refresh is needed
self.global_refresh_firewall = False
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_("Preparing filters for devices %s"), device_ids)
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
with self.firewall.defer_apply():
for device in devices.values():
self.firewall.prepare_port_filter(device)
def security_groups_rule_updated(self, security_groups):
LOG.info(_("Security group "
"rule updated %r"), security_groups)
self._security_group_updated(
security_groups,
'security_groups')
def security_groups_member_updated(self, security_groups):
LOG.info(_("Security group "
"member updated %r"), security_groups)
self._security_group_updated(
security_groups,
'security_group_source_groups')
def _security_group_updated(self, security_groups, attribute):
devices = []
sec_grp_set = set(security_groups)
for device in self.firewall.ports.values():
if sec_grp_set & set(device.get(attribute, [])):
devices.append(device['device'])
if devices:
if self.defer_refresh_firewall:
LOG.debug(_("Adding %s devices to the list of devices "
"for which firewall needs to be refreshed"),
devices)
self.devices_to_refilter |= set(devices)
else:
self.refresh_firewall(devices)
def security_groups_provider_updated(self):
LOG.info(_("Provider rule updated"))
if self.defer_refresh_firewall:
# NOTE(salv-orlando): A 'global refresh' might not be
# necessary if the subnet for which the provider rules
# were updated is known
self.global_refresh_firewall = True
else:
self.refresh_firewall()
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_("Remove device filter for %r"), device_ids)
with self.firewall.defer_apply():
for device_id in device_ids:
device = self.firewall.ports.get(device_id)
if not device:
continue
self.firewall.remove_port_filter(device)
def refresh_firewall(self, device_ids=None):
LOG.info(_("Refresh firewall rules"))
if not device_ids:
device_ids = self.firewall.ports.keys()
if not device_ids:
LOG.info(_("No ports here to refresh firewall"))
return
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, device_ids)
with self.firewall.defer_apply():
for device in devices.values():
LOG.debug(_("Update port filter for %s"), device['device'])
self.firewall.update_port_filter(device)
def firewall_refresh_needed(self):
return self.global_refresh_firewall or self.devices_to_refilter
def setup_port_filters(self, new_devices, updated_devices):
"""Configure port filters for devices.
This routine applies filters for new devices and refreshes firewall
rules when devices have been updated, or when there are changes in
security group membership or rules.
:param new_devices: set containing identifiers for new devices
:param updated_devices: set containining identifiers for
updated devices
"""
if new_devices:
LOG.debug(_("Preparing device filters for %d new devices"),
len(new_devices))
self.prepare_devices_filter(new_devices)
# These data structures are cleared here in order to avoid
# losing updates occurring during firewall refresh
devices_to_refilter = self.devices_to_refilter
global_refresh_firewall = self.global_refresh_firewall
self.devices_to_refilter = set()
self.global_refresh_firewall = False
# TODO(salv-orlando): Avoid if possible ever performing the global
# refresh providing a precise list of devices for which firewall
# should be refreshed
if global_refresh_firewall:
LOG.debug(_("Refreshing firewall for all filtered devices"))
self.refresh_firewall()
else:
# If a device is both in new and updated devices
# avoid reprocessing it
updated_devices = ((updated_devices | devices_to_refilter) -
new_devices)
if updated_devices:
LOG.debug(_("Refreshing firewall for %d devices"),
len(updated_devices))
self.refresh_firewall(updated_devices)
class SecurityGroupAgentRpcApiMixin(object):
def _get_security_group_topic(self):
return topics.get_topic_name(self.topic,
topics.SECURITY_GROUP,
topics.UPDATE)
def security_groups_rule_updated(self, context, security_groups):
"""Notify rule updated security groups."""
if not security_groups:
return
self.fanout_cast(context,
self.make_msg('security_groups_rule_updated',
security_groups=security_groups),
version=SG_RPC_VERSION,
topic=self._get_security_group_topic())
def security_groups_member_updated(self, context, security_groups):
"""Notify member updated security groups."""
if not security_groups:
return
self.fanout_cast(context,
self.make_msg('security_groups_member_updated',
security_groups=security_groups),
version=SG_RPC_VERSION,
topic=self._get_security_group_topic())
def security_groups_provider_updated(self, context):
"""Notify provider updated security groups."""
self.fanout_cast(context,
self.make_msg('security_groups_provider_updated'),
version=SG_RPC_VERSION,
topic=self._get_security_group_topic())