Add gre tunneling support for the ML2 plugin

This patch add the type_driver GRE to enable the creation of GRE
tunnels with the OVS agent.
No Endpoints ID are managed. Only Endpoint IP are stored in DB,
and this IP is proposed as an endpoint ID for the OVS agent.
It also fixes the bug 1201471.

Implements: blueprint ml2-gre
Change-Id: I1a33a4bd3ebc4c97eecf17a59ce16b8c2066ec66
This commit is contained in:
mathieu-rohon 2013-07-16 13:24:25 +02:00
parent b99b020b29
commit 6bdfccaf1b
14 changed files with 607 additions and 47 deletions

View File

@ -3,7 +3,7 @@
# (ListOpt) List of network type driver entrypoints to be loaded from # (ListOpt) List of network type driver entrypoints to be loaded from
# the quantum.ml2.type_drivers namespace. # the quantum.ml2.type_drivers namespace.
# #
# type_drivers = local,flat,vlan # type_drivers = local,flat,vlan,gre
# Example: type_drivers = flat,vlan,gre # Example: type_drivers = flat,vlan,gre
# (ListOpt) Ordered list of network_types to allocate as tenant # (ListOpt) Ordered list of network_types to allocate as tenant

View File

@ -95,7 +95,8 @@ class PluginApi(proxy.RpcProxy):
agent_id=agent_id), agent_id=agent_id),
topic=self.topic) topic=self.topic)
def tunnel_sync(self, context, tunnel_ip): def tunnel_sync(self, context, tunnel_ip, tunnel_type=None):
return self.call(context, return self.call(context,
self.make_msg('tunnel_sync', tunnel_ip=tunnel_ip), self.make_msg('tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type),
topic=self.topic) topic=self.topic)

View File

@ -0,0 +1,66 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""DB Migration for ML2 GRE Type Driver
Revision ID: 20ae61555e95
Revises: 13de305df56e
Create Date: 2013-07-10 17:19:03.021937
"""
# revision identifiers, used by Alembic.
revision = '20ae61555e95'
down_revision = '13de305df56e'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.ml2.plugin.Ml2Plugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'ml2_gre_allocations',
sa.Column('gre_id', sa.Integer, nullable=False,
autoincrement=False),
sa.Column('allocated', sa.Boolean, nullable=False),
sa.PrimaryKeyConstraint('gre_id')
)
op.create_table(
'ml2_gre_endpoints',
sa.Column('ip_address', sa.String(length=64)),
sa.PrimaryKeyConstraint('ip_address')
)
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('ml2_gre_allocations')
op.drop_table('ml2_gre_endpoints')

View File

@ -20,7 +20,7 @@ from neutron import scheduler
ml2_opts = [ ml2_opts = [
cfg.ListOpt('type_drivers', cfg.ListOpt('type_drivers',
default=['local', 'flat', 'vlan'], default=['local', 'flat', 'vlan', 'gre'],
help=_("List of network type driver entrypoints to be loaded " help=_("List of network type driver entrypoints to be loaded "
"from the neutron.ml2.type_drivers namespace.")), "from the neutron.ml2.type_drivers namespace.")),
cfg.ListOpt('tenant_network_types', cfg.ListOpt('tenant_network_types',

View File

@ -0,0 +1,212 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc as sa_exc
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
gre_opts = [
cfg.ListOpt('tunnel_id_ranges',
default=[],
help=_("Comma-separated list of <tun_min>:<tun_max> tuples "
"enumerating ranges of GRE tunnel IDs that are "
"available for tenant network allocation"))
]
cfg.CONF.register_opts(gre_opts, "ml2_type_gre")
class GreAllocation(model_base.BASEV2):
__tablename__ = 'ml2_gre_allocations'
gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False)
class GreEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_gre_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
def __repr__(self):
return "<GreTunnelEndpoint(%s)>" % self.ip_address
class GreTypeDriver(api.TypeDriver,
type_tunnel.TunnelTypeDriver):
def get_type(self):
return type_tunnel.TYPE_GRE
def initialize(self):
self.gre_id_ranges = []
self._parse_gre_id_ranges()
self._sync_gre_allocations()
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
if physical_network:
msg = _("provider:physical_network specified for GRE "
"network")
raise exc.InvalidInput(error_message=msg)
segmentation_id = segment.get(api.SEGMENTATION_ID)
if not segmentation_id:
msg = _("segmentation_id required for GRE provider network")
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
with session.begin(subtransactions=True):
try:
alloc = (session.query(GreAllocation).
filter_by(gre_id=segmentation_id).
with_lockmode('update').
one())
if alloc.allocated:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
LOG.debug(_("Reserving specific gre tunnel %s from pool"),
segmentation_id)
alloc.allocated = True
except sa_exc.NoResultFound:
LOG.debug(_("Reserving specific gre tunnel %s outside pool"),
segmentation_id)
alloc = GreAllocation(gre_id=segmentation_id)
alloc.allocated = True
session.add(alloc)
def allocate_tenant_segment(self, session):
with session.begin(subtransactions=True):
alloc = (session.query(GreAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Allocating gre tunnel id %(gre_id)s"),
{'gre_id': alloc.gre_id})
alloc.allocated = True
return {api.NETWORK_TYPE: type_tunnel.TYPE_GRE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: alloc.gre_id}
def release_segment(self, session, segment):
gre_id = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(GreAllocation).
filter_by(gre_id=gre_id).
with_lockmode('update').
one())
alloc.allocated = False
for lo, hi in self.gre_id_ranges:
if lo <= gre_id <= hi:
LOG.debug(_("Releasing gre tunnel %s to pool"),
gre_id)
break
else:
session.delete(alloc)
LOG.debug(_("Releasing gre tunnel %s outside pool"),
gre_id)
except sa_exc.NoResultFound:
LOG.warning(_("gre_id %s not found"), gre_id)
def _parse_gre_id_ranges(self):
for entry in cfg.CONF.ml2_type_gre.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
tun_min = tun_min.strip()
tun_max = tun_max.strip()
self.gre_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: '%(range)s' - %(e)s. "
"Agent terminated!"),
{'range': cfg.CONF.ml2_type_gre.tunnel_id_ranges,
'e': ex})
LOG.info(_("gre ID ranges: %s"), self.gre_id_ranges)
def _sync_gre_allocations(self):
"""Synchronize gre_allocations table with configured tunnel ranges."""
# determine current configured allocatable gres
gre_ids = set()
for gre_id_range in self.gre_id_ranges:
tun_min, tun_max = gre_id_range
if tun_max + 1 - tun_min > 1000000:
LOG.error(_("Skipping unreasonable gre ID range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
gre_ids |= set(xrange(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
allocs = (session.query(GreAllocation).all())
for alloc in allocs:
try:
# see if tunnel is allocatable
gre_ids.remove(alloc.gre_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing tunnel %s from pool"),
alloc.gre_id)
session.delete(alloc)
# add missing allocatable tunnels to table
for gre_id in sorted(gre_ids):
alloc = GreAllocation(gre_id=gre_id)
session.add(alloc)
def get_gre_allocation(self, session, gre_id):
return session.query(GreAllocation).filter_by(gre_id=gre_id).first()
def get_endpoints(self):
"""Get every gre endpoints from database."""
LOG.debug(_("get_gre_endpoints() called"))
session = db_api.get_session()
with session.begin(subtransactions=True):
gre_endpoints = session.query(GreEndpoints)
return [{'ip_address': gre_endpoint.ip_address}
for gre_endpoint in gre_endpoints]
def add_endpoint(self, ip):
LOG.debug(_("add_gre_endpoint() called for ip %s"), ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
gre_endpoint = (session.query(GreEndpoints).
filter_by(ip_address=ip).one())
LOG.warning(_("Gre endpoint with ip %s already exists"), ip)
except sa_exc.NoResultFound:
gre_endpoint = GreEndpoints(ip_address=ip)
session.add(gre_endpoint)
return gre_endpoint

View File

@ -0,0 +1,98 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron.openstack.common import log
LOG = log.getLogger(__name__)
TUNNEL = 'tunnel'
TYPE_GRE = 'gre'
class TunnelTypeDriver(object):
"""Define stable abstract interface for ML2 type drivers.
tunnel type networks rely on tunnel endpoints. This class defines abstract
methods to manage these endpoints.
"""
__metaclass__ = ABCMeta
@abstractmethod
def add_endpoint(self, ip):
"""Register the endpoint in the type_driver database.
param ip: the ip of the endpoint
"""
pass
@abstractmethod
def get_endpoints(self):
"""Get every endpoint managed by the type_driver
:returns a list of dict [{id:endpoint_id, ip_address:endpoint_ip},..]
"""
pass
class TunnelRpcCallbackMixin(object):
def __init__(self, notifier, type_manager):
self.notifier = notifier
self.type_manager = type_manager
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the database with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
msg = "network_type value needed by the ML2 plugin"
raise exc.InvalidInput(error_message=msg)
driver = self.type_manager.drivers.get(tunnel_type)
if driver:
tunnel = driver.obj.add_endpoint(tunnel_ip)
tunnels = driver.obj.get_endpoints()
entry = {'tunnels': tunnels}
# Notify all other listening agents
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel_type)
# Return the list of tunnels IP's to the agent
return entry
else:
msg = _("network_type value '%s' not supported") % tunnel_type
raise exc.InvalidInput(error_message=msg)
class TunnelAgentRpcApiMixin(object):
def _get_tunnel_update_topic(self):
return topics.get_topic_name(self.topic,
TUNNEL,
topics.UPDATE)
def tunnel_update(self, context, tunnel_ip, tunnel_type):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type),
topic=self._get_tunnel_update_topic())

View File

@ -109,7 +109,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify
self.callbacks = rpc.RpcCallbacks(self.notifier) self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager)
self.topic = topics.PLUGIN self.topic = topics.PLUGIN
self.conn = c_rpc.create_connection(new=True) self.conn = c_rpc.create_connection(new=True)
self.dispatcher = self.callbacks.create_rpc_dispatcher() self.dispatcher = self.callbacks.create_rpc_dispatcher()

View File

@ -26,6 +26,9 @@ from neutron.openstack.common import log
from neutron.openstack.common.rpc import proxy from neutron.openstack.common.rpc import proxy
from neutron.plugins.ml2 import db from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -35,15 +38,20 @@ TAP_DEVICE_PREFIX_LENGTH = 3
class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin, class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin, l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin): sg_db_rpc.SecurityGroupServerRpcCallbackMixin,
type_tunnel.TunnelRpcCallbackMixin):
RPC_API_VERSION = '1.1' RPC_API_VERSION = '1.1'
# history # history
# 1.0 Initial version (from openvswitch/linuxbridge) # 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC # 1.1 Support Security Group RPC
def __init__(self, notifier): def __init__(self, notifier, type_manager):
self.notifier = notifier # REVISIT(kmestery): This depends on the first three super classes
# not having their own __init__ functions. If an __init__() is added
# to one, this could break. Fix this and add a unit test to cover this
# test in H3.
super(RpcCallbacks, self).__init__(notifier, type_manager)
def create_rpc_dispatcher(self): def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager. '''Get the rpc dispatcher for this manager.
@ -156,12 +164,10 @@ class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
if port.status != q_const.PORT_STATUS_ACTIVE: if port.status != q_const.PORT_STATUS_ACTIVE:
port.status = q_const.PORT_STATUS_ACTIVE port.status = q_const.PORT_STATUS_ACTIVE
# TODO(rkukura) Add tunnel_sync() here if not implemented via a
# driver.
class AgentNotifierApi(proxy.RpcProxy, class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin): sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API. """Agent side of the openvswitch rpc API.
API version history: API version history:
@ -183,9 +189,6 @@ class AgentNotifierApi(proxy.RpcProxy,
topics.PORT, topics.PORT,
topics.UPDATE) topics.UPDATE)
# TODO(rkukura): Add topic_tunnel_update here if not
# implemented via a driver.
def network_delete(self, context, network_id): def network_delete(self, context, network_id):
self.fanout_cast(context, self.fanout_cast(context,
self.make_msg('network_delete', self.make_msg('network_delete',
@ -201,6 +204,3 @@ class AgentNotifierApi(proxy.RpcProxy,
segmentation_id=segmentation_id, segmentation_id=segmentation_id,
physical_network=physical_network), physical_network=physical_network),
topic=self.topic_port_update) topic=self.topic_port_update)
# TODO(rkukura): Add tunnel_update() here if not
# implemented via a driver.

View File

@ -289,7 +289,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
if not self.enable_tunneling: if not self.enable_tunneling:
return return
tunnel_ip = kwargs.get('tunnel_ip') tunnel_ip = kwargs.get('tunnel_ip')
tunnel_id = kwargs.get('tunnel_id') tunnel_id = kwargs.get('tunnel_id', tunnel_ip)
if not tunnel_id:
tunnel_id = tunnel_ip
tunnel_type = kwargs.get('tunnel_type') tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type: if not tunnel_type:
LOG.error(_("No tunnel_type specified, cannot create tunnels")) LOG.error(_("No tunnel_type specified, cannot create tunnels"))
@ -700,19 +702,19 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
def tunnel_sync(self): def tunnel_sync(self):
resync = False resync = False
try: try:
details = self.plugin_rpc.tunnel_sync(self.context, self.local_ip) for tunnel_type in self.tunnel_types:
tunnels = details['tunnels'] details = self.plugin_rpc.tunnel_sync(self.context,
for tunnel in tunnels: self.local_ip,
if self.local_ip != tunnel['ip_address']: tunnel_type)
tunnel_type = tunnel.get('tunnel_type') tunnels = details['tunnels']
if not tunnel_type: for tunnel in tunnels:
LOG.error(_('No tunnel_type specified, cannot add ' if self.local_ip != tunnel['ip_address']:
'tunnel port')) tunnel_id = tunnel.get('id', tunnel['ip_address'])
return tun_name = '%s-%s' % (tunnel_type, tunnel_id)
tun_name = '%s-%s' % (tunnel_type, tunnel['id']) self.tun_br.add_tunnel_port(tun_name,
self.tun_br.add_tunnel_port(tun_name, tunnel['ip_address'], tunnel['ip_address'],
tunnel_type, tunnel_type,
self.vxlan_udp_port) self.vxlan_udp_port)
except Exception as e: except Exception as e:
LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"),
{'local_ip': self.local_ip, 'e': e}) {'local_ip': self.local_ip, 'e': e})

View File

@ -123,4 +123,5 @@ class rpcHyperVApiTestCase(base.BaseTestCase):
self._test_hyperv_neutron_api( self._test_hyperv_neutron_api(
rpcapi, topics.PLUGIN, rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call', 'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip') tunnel_ip='fake_tunnel_ip',
tunnel_type=None)

View File

@ -23,6 +23,7 @@ from neutron.agent import rpc as agent_rpc
from neutron.common import topics from neutron.common import topics
from neutron.openstack.common import context from neutron.openstack.common import context
from neutron.openstack.common import rpc from neutron.openstack.common import rpc
from neutron.plugins.ml2.drivers import type_tunnel
from neutron.plugins.ml2 import rpc as plugin_rpc from neutron.plugins.ml2 import rpc as plugin_rpc
from neutron.tests import base from neutron.tests import base
@ -71,14 +72,14 @@ class RpcApiTestCase(base.BaseTestCase):
segmentation_id='fake_segmentation_id', segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network') physical_network='fake_physical_network')
# def test_tunnel_update(self): def test_tunnel_update(self):
# rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
# self._test_rpc_api(rpcapi, self._test_rpc_api(rpcapi,
# topics.get_topic_name(topics.AGENT, topics.get_topic_name(topics.AGENT,
# constants.TUNNEL, type_tunnel.TUNNEL,
# topics.UPDATE), topics.UPDATE),
# 'tunnel_update', rpc_method='fanout_cast', 'tunnel_update', rpc_method='fanout_cast',
# tunnel_ip='fake_ip', tunnel_id='fake_id') tunnel_ip='fake_ip', tunnel_type='gre')
def test_device_details(self): def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN) rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
@ -94,11 +95,12 @@ class RpcApiTestCase(base.BaseTestCase):
device='fake_device', device='fake_device',
agent_id='fake_agent_id') agent_id='fake_agent_id')
# def test_tunnel_sync(self): def test_tunnel_sync(self):
# rpcapi = agent_rpc.PluginApi(topics.PLUGIN) rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
# self._test_rpc_api(rpcapi, topics.PLUGIN, self._test_rpc_api(rpcapi, topics.PLUGIN,
# 'tunnel_sync', rpc_method='call', 'tunnel_sync', rpc_method='call',
# tunnel_ip='fake_tunnel_ip') tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self): def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN) rpcapi = agent_rpc.PluginApi(topics.PLUGIN)

View File

@ -0,0 +1,176 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
import neutron.db.api as db
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_gre
from neutron.tests import base
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
class GreTypeTest(base.BaseTestCase):
def setUp(self):
super(GreTypeTest, self).setUp()
ml2_db.initialize()
self.driver = type_gre.GreTypeDriver()
self.driver.gre_id_ranges = TUNNEL_RANGES
self.driver._sync_gre_allocations()
self.session = db.get_session()
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MIN - 1))
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX - 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX)).allocated
)
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 1))
)
self.driver.gre_id_ranges = UPDATED_TUNNEL_RANGES
self.driver._sync_gre_allocations()
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5 - 1))
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5 + 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5 - 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5)).allocated
)
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5 + 1))
)
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertEqual(None, alloc)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertEqual(None, segment)
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_gre_endpoints(self):
tun_1 = self.driver.add_endpoint(TUNNEL_IP_ONE)
tun_2 = self.driver.add_endpoint(TUNNEL_IP_TWO)
self.assertEqual(TUNNEL_IP_ONE, tun_1.ip_address)
self.assertEqual(TUNNEL_IP_TWO, tun_2.ip_address)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
self.assertIn(endpoint['ip_address'],
[TUNNEL_IP_ONE, TUNNEL_IP_TWO])

View File

@ -108,7 +108,8 @@ class rpcApiTestCase(base.BaseTestCase):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN) rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN, self._test_ovs_api(rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call', 'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip') tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self): def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN) rpcapi = agent_rpc.PluginApi(topics.PLUGIN)

View File

@ -107,6 +107,7 @@ neutron.ml2.type_drivers =
flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver
local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver
vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver
gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver
neutron.ml2.mechanism_drivers = neutron.ml2.mechanism_drivers =
logger = neutron.tests.unit.ml2.drivers.mechanism_logger:LoggerMechanismDriver logger = neutron.tests.unit.ml2.drivers.mechanism_logger:LoggerMechanismDriver
test = neutron.tests.unit.ml2.drivers.mechanism_test:TestMechanismDriver test = neutron.tests.unit.ml2.drivers.mechanism_test:TestMechanismDriver