Group Policy: Resource Mapping Driver

(Patch series identifier: GPM-RMD-1)

This patch adds the resource_mapping group policy driver, which
renders group policy via mappings to traditional neutron resources,
including networks, subnets, ports, and routers. This initial version
of the driver does not yet handle policy rule resources, so all
traffic is allowed between endpoint groups that share an L3 policy.

Partially-implements: blueprint group-based-policy-abstraction

Author:            Bob Kukura <kukura@noironetworks.com>
Co-Authored-By:    Sumit Naiksatam <sumitnaiksatam@gmail.com>
Co-Authored-By:    Stephen Wong <s3wong@midokura.com>
Co-Authored-By:    Mohammad Banikazemi <mb@us.ibm.com>
Co-Authored-By:    Mandeep Dhami <dhami@noironetworks.com>

Change-Id: Ib4ca4fc9ae63f0f1295c95c01fd712ecc1ebb082
This commit is contained in:
Sumit Naiksatam
2014-09-29 18:35:45 -04:00
parent 3166344721
commit 3956e8c4ff
13 changed files with 1105 additions and 9 deletions

View File

@@ -103,6 +103,32 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
res['routers'] = [router.router_id for router in l3p.routers]
return self._fields(res, fields)
def _set_port_for_endpoint(self, context, ep_id, port_id):
with context.session.begin(subtransactions=True):
ep_db = self._get_endpoint(context, ep_id)
ep_db.port_id = port_id
def _add_subnet_to_endpoint_group(self, context, epg_id, subnet_id):
with context.session.begin(subtransactions=True):
epg_db = self._get_endpoint_group(context, epg_id)
assoc = EndpointGroupSubnetAssociation(endpoint_group_id=epg_id,
subnet_id=subnet_id)
epg_db.subnets.append(assoc)
return [subnet.subnet_id for subnet in epg_db.subnets]
def _set_network_for_l2_policy(self, context, l2p_id, network_id):
with context.session.begin(subtransactions=True):
l2p_db = self._get_l2_policy(context, l2p_id)
l2p_db.network_id = network_id
def _add_router_to_l3_policy(self, context, l3p_id, router_id):
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3p_id)
assoc = L3PolicyRouterAssociation(l3_policy_id=l3p_id,
router_id=router_id)
l3p_db.routers.append(assoc)
return [router.router_id for router in l3p_db.routers]
@log.log
def create_endpoint(self, context, endpoint):
ep = endpoint['endpoint']

View File

@@ -0,0 +1,73 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Group Policy Resource Mapping Driver (gpm_rmd_1)
Revision ID: 1bf7555fa01a
Revises: 23b6c4d703c7
Create Date: 2014-07-24 16:12:22.610815
"""
# revision identifiers, used by Alembic.
revision = '1bf7555fa01a'
down_revision = '23b6c4d703c7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'gpm_owned_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id')
)
op.create_table(
'gpm_owned_ports',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id')
)
op.create_table(
'gpm_owned_subnets',
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('subnet_id')
)
op.create_table(
'gpm_owned_routers',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
def downgrade():
op.drop_table('gpm_owned_routers')
op.drop_table('gpm_owned_subnets')
op.drop_table('gpm_owned_ports')
op.drop_table('gpm_owned_networks')

View File

@@ -1 +1 @@
23b6c4d703c7
1bf7555fa01a

View File

@@ -18,3 +18,45 @@ from neutron.common import exceptions
class GroupPolicyDriverError(exceptions.NeutronException):
"""Policy driver call failed."""
message = _("%(method)s failed.")
class GroupPolicyException(exceptions.NeutronException):
"""Base for policy driver exceptions returned to user."""
pass
class GroupPolicyDeploymentError(GroupPolicyException):
message = _("Deployment not configured properly. See logs for details.")
class GroupPolicyInternalError(GroupPolicyException):
message = _("Unexpected internal failure. See logs for details.")
class GroupPolicyBadRequest(exceptions.BadRequest, GroupPolicyException):
"""Base for policy driver exceptions returned to user."""
pass
class EndpointRequiresEndpointGroup(GroupPolicyBadRequest):
message = _("An endpoint group was not specified when creating endpoint.")
class EndpointEndpointGroupUpdateNotSupported(GroupPolicyBadRequest):
message = _("Updating endpoint's endpoint group is not supported.")
class EndpointGroupSubnetRemovalNotSupported(GroupPolicyBadRequest):
message = _("Removing a subnet from an endpoint group is not supported.")
class L3PolicyMultipleRoutersNotSupported(GroupPolicyBadRequest):
message = _("L3 policy does not support multiple routers.")
class L3PolicyRoutersUpdateNotSupported(GroupPolicyBadRequest):
message = _("Updating L3 policy's routers is not supported.")
class NoSubnetAvailable(exceptions.ResourceExhausted, GroupPolicyException):
message = _("No subnet is available from l3 policy's pool.")

View File

@@ -0,0 +1,495 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
import sqlalchemy as sa
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron.common import log
from neutron.db import model_base
from neutron import manager
from neutron.notifiers import nova
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as pconst
from gbp.neutron.services.grouppolicy.common import exceptions as exc
from gbp.neutron.services.grouppolicy import group_policy_driver_api as api
LOG = logging.getLogger(__name__)
class OwnedPort(model_base.BASEV2):
"""A Port owned by the resource_mapping driver."""
__tablename__ = 'gpm_owned_ports'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
nullable=False, primary_key=True)
class OwnedSubnet(model_base.BASEV2):
"""A Subnet owned by the resource_mapping driver."""
__tablename__ = 'gpm_owned_subnets'
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id', ondelete='CASCADE'),
nullable=False, primary_key=True)
class OwnedNetwork(model_base.BASEV2):
"""A Network owned by the resource_mapping driver."""
__tablename__ = 'gpm_owned_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
nullable=False, primary_key=True)
class OwnedRouter(model_base.BASEV2):
"""A Router owned by the resource_mapping driver."""
__tablename__ = 'gpm_owned_routers'
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
nullable=False, primary_key=True)
class ResourceMappingDriver(api.PolicyDriver):
"""Resource Mapping driver for Group Policy plugin.
This driver implements group policy semantics by mapping group
policy resources to various other neutron resources.
"""
@log.log
def initialize(self):
self._cached_agent_notifier = None
self._nova_notifier = nova.Notifier()
@log.log
def create_endpoint_precommit(self, context):
if not context.current['endpoint_group_id']:
raise exc.EndpointRequiresEndpointGroup()
@log.log
def create_endpoint_postcommit(self, context):
# TODO(rkukura): Validate explicit port belongs to subnet of
# EPG.
if not context.current['port_id']:
self._use_implicit_port(context)
@log.log
def update_endpoint_precommit(self, context):
if (context.current['endpoint_group_id'] !=
context.original['endpoint_group_id']):
raise exc.EndpointEndpointGroupUpdateNotSupported()
@log.log
def update_endpoint_postcommit(self, context):
pass
@log.log
def delete_endpoint_precommit(self, context):
pass
@log.log
def delete_endpoint_postcommit(self, context):
port_id = context.current['port_id']
self._cleanup_port(context, port_id)
@log.log
def create_endpoint_group_precommit(self, context):
pass
@log.log
def create_endpoint_group_postcommit(self, context):
# TODO(rkukura): Validate explicit subnet belongs to L2P's
# network.
subnets = context.current['subnets']
if subnets:
l2p_id = context.current['l2_policy_id']
l2p = context._plugin.get_l2_policy(context._plugin_context,
l2p_id)
l3p_id = l2p['l3_policy_id']
l3p = context._plugin.get_l3_policy(context._plugin_context,
l3p_id)
router_id = l3p['routers'][0]
for subnet_id in subnets:
self._use_explicit_subnet(context, subnet_id, router_id)
else:
self._use_implicit_subnet(context)
@log.log
def update_endpoint_group_precommit(self, context):
if set(context.original['subnets']) - set(context.current['subnets']):
raise exc.EndpointGroupSubnetRemovalNotSupported()
@log.log
def update_endpoint_group_postcommit(self, context):
pass
@log.log
def delete_endpoint_group_precommit(self, context):
pass
@log.log
def delete_endpoint_group_postcommit(self, context):
l2p_id = context.current['l2_policy_id']
l2p = context._plugin.get_l2_policy(context._plugin_context, l2p_id)
l3p_id = l2p['l3_policy_id']
l3p = context._plugin.get_l3_policy(context._plugin_context, l3p_id)
router_id = l3p['routers'][0]
for subnet_id in context.current['subnets']:
self._cleanup_subnet(context, subnet_id, router_id)
@log.log
def create_l2_policy_precommit(self, context):
pass
@log.log
def create_l2_policy_postcommit(self, context):
if not context.current['network_id']:
self._use_implicit_network(context)
@log.log
def update_l2_policy_precommit(self, context):
pass
@log.log
def update_l2_policy_postcommit(self, context):
pass
@log.log
def delete_l2_policy_precommit(self, context):
pass
@log.log
def delete_l2_policy_postcommit(self, context):
network_id = context.current['network_id']
self._cleanup_network(context, network_id)
@log.log
def create_l3_policy_precommit(self, context):
if len(context.current['routers']) > 1:
raise exc.L3PolicyMultipleRoutersNotSupported()
@log.log
def create_l3_policy_postcommit(self, context):
if not context.current['routers']:
self._use_implicit_router(context)
@log.log
def update_l3_policy_precommit(self, context):
if context.current['routers'] != context.original['routers']:
raise exc.L3PolicyRoutersUpdateNotSupported()
@log.log
def update_l3_policy_postcommit(self, context):
pass
@log.log
def delete_l3_policy_precommit(self, context):
pass
@log.log
def delete_l3_policy_postcommit(self, context):
for router_id in context.current['routers']:
self._cleanup_router(context, router_id)
def _use_implicit_port(self, context):
epg_id = context.current['endpoint_group_id']
epg = context._plugin.get_endpoint_group(context._plugin_context,
epg_id)
l2p_id = epg['l2_policy_id']
l2p = context._plugin.get_l2_policy(context._plugin_context, l2p_id)
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'ep_' + context.current['name'],
'network_id': l2p['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_id': '',
'device_owner': '',
'admin_state_up': True}
port = self._create_port(context, attrs)
port_id = port['id']
self._mark_port_owned(context._plugin_context.session, port_id)
context.set_port_id(port_id)
def _cleanup_port(self, context, port_id):
if self._port_is_owned(context._plugin_context.session, port_id):
self._delete_port(context, port_id)
def _use_implicit_subnet(self, context):
# REVISIT(rkukura): This is a temporary allocation algorithm
# that depends on an exception being raised when the subnet
# being created is already in use. A DB allocation table for
# the pool of subnets, or at least a more efficient way to
# test if a subnet is in-use, may be needed.
l2p_id = context.current['l2_policy_id']
l2p = context._plugin.get_l2_policy(context._plugin_context, l2p_id)
l3p_id = l2p['l3_policy_id']
l3p = context._plugin.get_l3_policy(context._plugin_context, l3p_id)
pool = netaddr.IPNetwork(l3p['ip_pool'])
for cidr in pool.subnet(l3p['subnet_prefix_length']):
try:
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'epg_' + context.current['name'],
'network_id': l2p['network_id'],
'ip_version': l3p['ip_version'],
'cidr': cidr.__str__(),
'enable_dhcp': True,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED}
subnet = self._create_subnet(context, attrs)
subnet_id = subnet['id']
try:
router_id = l3p['routers'][0]
interface_info = {'subnet_id': subnet_id}
self._add_router_interface(context, router_id,
interface_info)
self._mark_subnet_owned(context._plugin_context.session,
subnet_id)
context.add_subnet(subnet_id)
return
except n_exc.InvalidInput:
# This exception is not expected. We catch this
# here so that it isn't caught below and handled
# as if the CIDR is already in use.
LOG.exception(_("adding subnet to router failed"))
self._delete_subnet(context, subnet['id'])
raise exc.GroupPolicyInternalError()
except n_exc.BadRequest:
# This is expected (CIDR overlap) until we have a
# proper subnet allocation algorithm. We ignore the
# exception and repeat with the next CIDR.
pass
raise exc.NoSubnetAvailable()
def _use_explicit_subnet(self, context, subnet_id, router_id):
interface_info = {'subnet_id': subnet_id}
self._add_router_interface(context, router_id, interface_info)
def _cleanup_subnet(self, context, subnet_id, router_id):
interface_info = {'subnet_id': subnet_id}
self._remove_router_interface(context, router_id, interface_info)
if self._subnet_is_owned(context._plugin_context.session, subnet_id):
self._delete_subnet(context, subnet_id)
def _use_implicit_network(self, context):
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'l2p_' + context.current['name'],
'admin_state_up': True,
'shared': False}
network = self._create_network(context, attrs)
network_id = network['id']
self._mark_network_owned(context._plugin_context.session, network_id)
context.set_network_id(network_id)
def _cleanup_network(self, context, network_id):
if self._network_is_owned(context._plugin_context.session, network_id):
self._delete_network(context, network_id)
def _use_implicit_router(self, context):
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'l3p_' + context.current['name'],
'external_gateway_info': None,
'admin_state_up': True}
router = self._create_router(context, attrs)
router_id = router['id']
self._mark_router_owned(context._plugin_context.session, router_id)
context.add_router(router_id)
def _cleanup_router(self, context, router_id):
if self._router_is_owned(context._plugin_context.session, router_id):
self._delete_router(context, router_id)
# The following methods perform the necessary subset of
# functionality from neutron.api.v2.base.Controller.
#
# REVISIT(rkukura): Can we just use the WSGI Controller? Using
# neutronclient is also a possibility, but presents significant
# issues to unit testing as well as overhead and failure modes.
def _create_port(self, context, attrs):
return self._create_resource(self._core_plugin,
context._plugin_context,
'port', attrs)
def _delete_port(self, context, port_id):
self._delete_resource(self._core_plugin,
context._plugin_context,
'port', port_id)
def _create_subnet(self, context, attrs):
return self._create_resource(self._core_plugin,
context._plugin_context,
'subnet', attrs)
def _delete_subnet(self, context, subnet_id):
self._delete_resource(self._core_plugin,
context._plugin_context,
'subnet', subnet_id)
def _create_network(self, context, attrs):
return self._create_resource(self._core_plugin,
context._plugin_context,
'network', attrs)
def _delete_network(self, context, network_id):
self._delete_resource(self._core_plugin,
context._plugin_context,
'network', network_id)
def _create_router(self, context, attrs):
return self._create_resource(self._l3_plugin,
context._plugin_context,
'router', attrs)
def _add_router_interface(self, context, router_id, interface_info):
self._l3_plugin.add_router_interface(context._plugin_context,
router_id, interface_info)
def _remove_router_interface(self, context, router_id, interface_info):
self._l3_plugin.remove_router_interface(context._plugin_context,
router_id, interface_info)
def _delete_router(self, context, router_id):
self._delete_resource(self._l3_plugin,
context._plugin_context,
'router', router_id)
def _create_resource(self, plugin, context, resource, attrs):
# REVISIT(rkukura): Do create.start notification?
# REVISIT(rkukura): Check authorization?
# REVISIT(rkukura): Do quota?
action = 'create_' + resource
obj_creator = getattr(plugin, action)
obj = obj_creator(context, {resource: attrs})
self._nova_notifier.send_network_change(action, {}, {resource: obj})
# REVISIT(rkukura): Do create.end notification?
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context,
{resource: obj},
resource + '.create.end')
return obj
def _update_resource(self, plugin, context, resource, resource_id, attrs):
# REVISIT(rkukura): Do update.start notification?
# REVISIT(rkukura): Check authorization?
obj_getter = getattr(plugin, 'get_' + resource)
orig_obj = obj_getter(context, resource_id)
action = 'update_' + resource
obj_updater = getattr(plugin, action)
obj = obj_updater(context, resource_id, {resource: attrs})
self._nova_notifier.send_network_change(action, orig_obj,
{resource: obj})
# REVISIT(rkukura): Do update.end notification?
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context,
{resource: obj},
resource + '.update.end')
return obj
def _delete_resource(self, plugin, context, resource, resource_id):
# REVISIT(rkukura): Do delete.start notification?
# REVISIT(rkukura): Check authorization?
obj_getter = getattr(plugin, 'get_' + resource)
obj = obj_getter(context, resource_id)
action = 'delete_' + resource
obj_deleter = getattr(plugin, action)
obj_deleter(context, resource_id)
self._nova_notifier.send_network_change(action, {}, {resource: obj})
# REVISIT(rkukura): Do delete.end notification?
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context,
{resource: obj},
resource + '.delete.end')
@property
def _core_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
return manager.NeutronManager.get_plugin()
@property
def _l3_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
plugins = manager.NeutronManager.get_service_plugins()
l3_plugin = plugins.get(pconst.L3_ROUTER_NAT)
if not l3_plugin:
LOG.error(_("No L3 router service plugin found."))
raise exc.GroupPolicyDeploymentError()
return l3_plugin
@property
def _dhcp_agent_notifier(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store notifier.
if not self._cached_agent_notifier:
agent_notifiers = getattr(self._core_plugin, 'agent_notifiers', {})
self._cached_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
return self._cached_agent_notifier
def _mark_port_owned(self, session, port_id):
with session.begin(subtransactions=True):
owned = OwnedPort(port_id=port_id)
session.add(owned)
def _port_is_owned(self, session, port_id):
with session.begin(subtransactions=True):
return (session.query(OwnedPort).
filter_by(port_id=port_id).
first() is not None)
def _mark_subnet_owned(self, session, subnet_id):
with session.begin(subtransactions=True):
owned = OwnedSubnet(subnet_id=subnet_id)
session.add(owned)
def _subnet_is_owned(self, session, subnet_id):
with session.begin(subtransactions=True):
return (session.query(OwnedSubnet).
filter_by(subnet_id=subnet_id).
first() is not None)
def _mark_network_owned(self, session, network_id):
with session.begin(subtransactions=True):
owned = OwnedNetwork(network_id=network_id)
session.add(owned)
def _network_is_owned(self, session, network_id):
with session.begin(subtransactions=True):
return (session.query(OwnedNetwork).
filter_by(network_id=network_id).
first() is not None)
def _mark_router_owned(self, session, router_id):
with session.begin(subtransactions=True):
owned = OwnedRouter(router_id=router_id)
session.add(owned)
def _router_is_owned(self, session, router_id):
with session.begin(subtransactions=True):
return (session.query(OwnedRouter).
filter_by(router_id=router_id).
first() is not None)

View File

@@ -36,6 +36,11 @@ class EndpointContext(GroupPolicyContext, api.EndpointContext):
def original(self):
return self._original_endpoint
def set_port_id(self, port_id):
self._plugin._set_port_for_endpoint(
self._plugin_context, self._endpoint['id'], port_id)
self._endpoint['port_id'] = port_id
class EndpointGroupContext(GroupPolicyContext, api.EndpointGroupContext):
@@ -58,6 +63,11 @@ class EndpointGroupContext(GroupPolicyContext, api.EndpointGroupContext):
self._plugin_context, self._endpoint_group['id'], l2_policy_id)
self._endpoint_group['l2_policy_id'] = l2_policy_id
def add_subnet(self, subnet_id):
subnets = self._plugin._add_subnet_to_endpoint_group(
self._plugin_context, self._endpoint_group['id'], subnet_id)
self._endpoint_group['subnets'] = subnets
class L2PolicyContext(GroupPolicyContext, api.L2PolicyContext):
@@ -80,6 +90,11 @@ class L2PolicyContext(GroupPolicyContext, api.L2PolicyContext):
self._plugin_context, self._l2_policy['id'], l3_policy_id)
self._l2_policy['l3_policy_id'] = l3_policy_id
def set_network_id(self, network_id):
self._plugin._set_network_for_l2_policy(
self._plugin_context, self._l2_policy['id'], network_id)
self._l2_policy['network_id'] = network_id
class L3PolicyContext(GroupPolicyContext, api.L3PolicyContext):
@@ -96,3 +111,8 @@ class L3PolicyContext(GroupPolicyContext, api.L3PolicyContext):
@property
def original(self):
return self._original_l3_policy
def add_router(self, router_id):
routers = self._plugin._add_router_to_l3_policy(
self._plugin_context, self._l3_policy['id'], router_id)
self._l3_policy['routers'] = routers

View File

@@ -43,6 +43,16 @@ class EndpointContext(object):
"""
pass
@abc.abstractmethod
def set_port_id(self, port_id):
"""Set the port for the endpoint.
:param port_id: Port to which endpoint is mapped.
Set the neutron port to which the endpoint is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class EndpointGroupContext(object):
@@ -82,6 +92,17 @@ class EndpointGroupContext(object):
"""
pass
@abc.abstractmethod
def add_subnet(self, subnet_id):
"""Add the subnet to the endpoint_group.
:param subnet_id: Subnet to which endpoint_group is mapped.
Add a neutron subnet to the set of subnets to which the
endpoint_group is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class L2PolicyContext(object):
@@ -121,6 +142,16 @@ class L2PolicyContext(object):
"""
pass
@abc.abstractmethod
def set_network_id(self, network_id):
"""Set the network for the l2_policy.
:param network_id: Network to which l2_policy is mapped.
Set the neutron network to which the l2_policy is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class L3PolicyContext(object):
@@ -151,6 +182,17 @@ class L3PolicyContext(object):
"""
pass
@abc.abstractmethod
def add_router(self, router_id):
"""Add the router to the l3_policy.
:param router_id: Router to which l3_policy is mapped.
Add a neutron router to the set of routers to which the
l3_policy is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyDriver(object):

View File

@@ -111,7 +111,11 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
for driver in self.ordered_policy_drivers:
try:
getattr(driver.obj, method_name)(context)
except gp_exc.GroupPolicyException:
# This is an exception for the user.
raise
except Exception:
# This is an internal failure.
LOG.exception(
_("Policy driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}

View File

@@ -194,7 +194,8 @@ DB_GP_PLUGIN_KLASS = (GroupPolicyDBTestPlugin.__module__ + '.' +
class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, gp_plugin=None, service_plugins=None, ext_mgr=None):
def setUp(self, core_plugin=None, gp_plugin=None, service_plugins=None,
ext_mgr=None):
extensions.append_api_extensions_path(gbp.neutron.extensions.__path__)
if not gp_plugin:
gp_plugin = DB_GP_PLUGIN_KLASS
@@ -203,7 +204,7 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
service_plugins = {'gp_plugin_name': gp_plugin}
super(GroupPolicyDbTestCase, self).setUp(
ext_mgr=ext_mgr,
plugin=core_plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins
)

View File

@@ -32,14 +32,16 @@ DB_GP_PLUGIN_KLASS = (GroupPolicyMappingDBTestPlugin.__module__ + '.' +
class GroupPolicyMappingDbTestCase(tgpdb.GroupPolicyDbTestCase,
test_l3_plugin.L3NatTestCaseMixin):
def setUp(self, gp_plugin=None, service_plugins=None):
def setUp(self, core_plugin=None, gp_plugin=None, service_plugins=None):
if not gp_plugin:
gp_plugin = DB_GP_PLUGIN_KLASS
if not service_plugins:
service_plugins = {'l3_plugin_name': "router",
'gp_plugin_name': gp_plugin}
super(GroupPolicyMappingDbTestCase, self).setUp(
gp_plugin=gp_plugin, service_plugins=service_plugins)
core_plugin=core_plugin, gp_plugin=gp_plugin,
service_plugins=service_plugins
)
def _get_test_endpoint_attrs(self, name='ep1', description='test ep',
endpoint_group_id=None, port_id=None):

View File

@@ -28,10 +28,11 @@ GP_PLUGIN_KLASS = (
class GroupPolicyPluginTestCase(tgpmdb.GroupPolicyMappingDbTestCase):
def setUp(self, core_plugin=None, gp_plugin=None, ext_mgr=None):
super(GroupPolicyPluginTestCase, self).setUp(
gp_plugin=GP_PLUGIN_KLASS
)
def setUp(self, core_plugin=None, gp_plugin=None):
if not gp_plugin:
gp_plugin = GP_PLUGIN_KLASS
super(GroupPolicyPluginTestCase, self).setUp(core_plugin=core_plugin,
gp_plugin=gp_plugin)
class TestGroupPolicyPluginGroupResources(

View File

@@ -0,0 +1,389 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.notifiers import nova
from gbp.neutron.services.grouppolicy import config
from gbp.neutron.tests.unit.services.grouppolicy import test_grouppolicy_plugin
CORE_PLUGIN = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin'
class ResourceMappingTestCase(
test_grouppolicy_plugin.GroupPolicyPluginTestCase):
def setUp(self):
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping'],
group='group_policy')
super(ResourceMappingTestCase, self).setUp(core_plugin=CORE_PLUGIN)
class TestEndpoint(ResourceMappingTestCase):
def test_implicit_port_lifecycle(self):
# Create endpoint group.
epg = self.create_endpoint_group(name="epg1")
epg_id = epg['endpoint_group']['id']
# Create endpoint with implicit port.
ep = self.create_endpoint(name="ep1", endpoint_group_id=epg_id)
ep_id = ep['endpoint']['id']
port_id = ep['endpoint']['port_id']
self.assertIsNotNone(port_id)
# TODO(rkukura): Verify implicit port belongs to endpoint
# group's subnet.
# Verify deleting endpoint cleans up port.
req = self.new_delete_request('endpoints', ep_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('ports', port_id, fmt=self.fmt)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_explicit_port_lifecycle(self):
# Create endpoint group.
epg = self.create_endpoint_group(name="epg1")
epg_id = epg['endpoint_group']['id']
subnet_id = epg['endpoint_group']['subnets'][0]
req = self.new_show_request('subnets', subnet_id)
subnet = self.deserialize(self.fmt, req.get_response(self.api))
# Create endpoint with explicit port.
with self.port(subnet=subnet) as port:
port_id = port['port']['id']
ep = self.create_endpoint(name="ep1", endpoint_group_id=epg_id,
port_id=port_id)
ep_id = ep['endpoint']['id']
self.assertEqual(port_id, ep['endpoint']['port_id'])
# Verify deleting endpoint does not cleanup port.
req = self.new_delete_request('endpoints', ep_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('ports', port_id, fmt=self.fmt)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_missing_epg_rejected(self):
data = self.create_endpoint(name="ep1",
expected_res_status=
webob.exc.HTTPBadRequest.code)
self.assertEqual('EndpointRequiresEndpointGroup',
data['NeutronError']['type'])
def test_epg_update_rejected(self):
# Create two endpoint groups.
epg1 = self.create_endpoint_group(name="epg1")
epg1_id = epg1['endpoint_group']['id']
epg2 = self.create_endpoint_group(name="epg2")
epg2_id = epg2['endpoint_group']['id']
# Create endpoint.
ep = self.create_endpoint(name="ep1", endpoint_group_id=epg1_id)
ep_id = ep['endpoint']['id']
# Verify updating endpoint group rejected.
data = {'endpoint': {'endpoint_group_id': epg2_id}}
req = self.new_update_request('endpoints', data, ep_id)
data = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual('EndpointEndpointGroupUpdateNotSupported',
data['NeutronError']['type'])
class TestEndpointGroup(ResourceMappingTestCase):
def test_implicit_subnet_lifecycle(self):
# Use explicit L2 policy so network and subnet not deleted
# with endpoint group.
l2p = self.create_l2_policy()
l2p_id = l2p['l2_policy']['id']
# Create endpoint group with implicit subnet.
epg = self.create_endpoint_group(name="epg1", l2_policy_id=l2p_id)
epg_id = epg['endpoint_group']['id']
subnets = epg['endpoint_group']['subnets']
self.assertIsNotNone(subnets)
self.assertEqual(len(subnets), 1)
subnet_id = subnets[0]
# TODO(rkukura): Verify implicit subnet belongs to L2 policy's
# network, is within L3 policy's ip_pool, and was added as
# router interface.
# Verify deleting endpoint group cleans up subnet.
req = self.new_delete_request('endpoint_groups', epg_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('subnets', subnet_id, fmt=self.fmt)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
# TODO(rkukura): Verify implicit subnet was removed as router
# interface.
def test_explicit_subnet_lifecycle(self):
# Create L3 policy.
l3p = self.create_l3_policy(name="l3p1", ip_pool='10.0.0.0/8')
l3p_id = l3p['l3_policy']['id']
# Create L2 policy.
l2p = self.create_l2_policy(name="l2p1", l3_policy_id=l3p_id)
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
req = self.new_show_request('networks', network_id)
network = self.deserialize(self.fmt, req.get_response(self.api))
# Create endpoint group with explicit subnet.
with self.subnet(network=network, cidr='10.10.1.0/24') as subnet:
subnet_id = subnet['subnet']['id']
epg = self.create_endpoint_group(name="epg1", l2_policy_id=l2p_id,
subnets=[subnet_id])
epg_id = epg['endpoint_group']['id']
subnets = epg['endpoint_group']['subnets']
self.assertIsNotNone(subnets)
self.assertEqual(len(subnets), 1)
self.assertEqual(subnet_id, subnets[0])
# TODO(rkukura): Verify explicit subnet was added as
# router interface.
# Verify deleting endpoint group does not cleanup subnet.
req = self.new_delete_request('endpoint_groups', epg_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('subnets', subnet_id, fmt=self.fmt)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
# TODO(rkukura): Verify explicit subnet was removed as
# router interface.
def test_add_subnet(self):
# Create L3 policy.
l3p = self.create_l3_policy(name="l3p1", ip_pool='10.0.0.0/8')
l3p_id = l3p['l3_policy']['id']
# Create L2 policy.
l2p = self.create_l2_policy(name="l2p1", l3_policy_id=l3p_id)
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
req = self.new_show_request('networks', network_id)
network = self.deserialize(self.fmt, req.get_response(self.api))
# Create endpoint group with explicit subnet.
with contextlib.nested(
self.subnet(network=network, cidr='10.10.1.0/24'),
self.subnet(network=network, cidr='10.10.2.0/24')
) as (subnet1, subnet2):
subnet1_id = subnet1['subnet']['id']
subnet2_id = subnet2['subnet']['id']
subnets = [subnet1_id]
epg = self.create_endpoint_group(l2_policy_id=l2p_id,
subnets=subnets)
epg_id = epg['endpoint_group']['id']
# Add subnet.
subnets = [subnet1_id, subnet2_id]
data = {'endpoint_group': {'subnets': subnets}}
req = self.new_update_request('endpoint_groups', data, epg_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_remove_subnet_rejected(self):
# Create L3 policy.
l3p = self.create_l3_policy(name="l3p1", ip_pool='10.0.0.0/8')
l3p_id = l3p['l3_policy']['id']
# Create L2 policy.
l2p = self.create_l2_policy(name="l2p1", l3_policy_id=l3p_id)
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
req = self.new_show_request('networks', network_id)
network = self.deserialize(self.fmt, req.get_response(self.api))
# Create endpoint group with explicit subnets.
with contextlib.nested(
self.subnet(network=network, cidr='10.10.1.0/24'),
self.subnet(network=network, cidr='10.10.2.0/24')
) as (subnet1, subnet2):
subnet1_id = subnet1['subnet']['id']
subnet2_id = subnet2['subnet']['id']
subnets = [subnet1_id, subnet2_id]
epg = self.create_endpoint_group(l2_policy_id=l2p_id,
subnets=subnets)
epg_id = epg['endpoint_group']['id']
# Verify removing subnet rejected.
data = {'endpoint_group': {'subnets': [subnet2_id]}}
req = self.new_update_request('endpoint_groups', data, epg_id)
data = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual('EndpointGroupSubnetRemovalNotSupported',
data['NeutronError']['type'])
def test_subnet_allocation(self):
epg1 = self.create_endpoint_group(name="epg1")
subnets = epg1['endpoint_group']['subnets']
req = self.new_show_request('subnets', subnets[0], fmt=self.fmt)
subnet1 = self.deserialize(self.fmt, req.get_response(self.api))
epg2 = self.create_endpoint_group(name="epg2")
subnets = epg2['endpoint_group']['subnets']
req = self.new_show_request('subnets', subnets[0], fmt=self.fmt)
subnet2 = self.deserialize(self.fmt, req.get_response(self.api))
self.assertNotEqual(subnet1['subnet']['cidr'],
subnet2['subnet']['cidr'])
# TODO(rkukura): Test ip_pool exhaustion.
class TestL2Policy(ResourceMappingTestCase):
def test_implicit_network_lifecycle(self):
# Create L2 policy with implicit network.
l2p = self.create_l2_policy(name="l2p1")
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
self.assertIsNotNone(network_id)
# Verify deleting L2 policy cleans up network.
req = self.new_delete_request('l2_policies', l2p_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('networks', network_id, fmt=self.fmt)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_explicit_network_lifecycle(self):
# Create L2 policy with explicit network.
with self.network() as network:
network_id = network['network']['id']
l2p = self.create_l2_policy(name="l2p1", network_id=network_id)
l2p_id = l2p['l2_policy']['id']
self.assertEqual(network_id, l2p['l2_policy']['network_id'])
# Verify deleting L2 policy does not cleanup network.
req = self.new_delete_request('l2_policies', l2p_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('networks', network_id, fmt=self.fmt)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
class TestL3Policy(ResourceMappingTestCase):
def test_implicit_router_lifecycle(self):
# Create L3 policy with implicit router.
l3p = self.create_l3_policy(name="l3p1")
l3p_id = l3p['l3_policy']['id']
routers = l3p['l3_policy']['routers']
self.assertIsNotNone(routers)
self.assertEqual(len(routers), 1)
router_id = routers[0]
# Verify deleting L3 policy cleans up router.
req = self.new_delete_request('l3_policies', l3p_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('routers', router_id, fmt=self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_explicit_router_lifecycle(self):
# Create L3 policy with explicit router.
with self.router() as router:
router_id = router['router']['id']
l3p = self.create_l3_policy(name="l3p1", routers=[router_id])
l3p_id = l3p['l3_policy']['id']
routers = l3p['l3_policy']['routers']
self.assertIsNotNone(routers)
self.assertEqual(len(routers), 1)
self.assertEqual(router_id, routers[0])
# Verify deleting L3 policy does not cleanup router.
req = self.new_delete_request('l3_policies', l3p_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('routers', router_id, fmt=self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_multiple_routers_rejected(self):
# Verify update l3 policy with explicit router rejected.
with contextlib.nested(self.router(),
self.router()) as (router1, router2):
router1_id = router1['router']['id']
router2_id = router2['router']['id']
data = self.create_l3_policy(name="l3p1",
routers=[router1_id, router2_id],
expected_res_status=
webob.exc.HTTPBadRequest.code)
self.assertEqual('L3PolicyMultipleRoutersNotSupported',
data['NeutronError']['type'])
def test_router_update_rejected(self):
# Create L3 policy with implicit router.
l3p = self.create_l3_policy(name="l3p1")
l3p_id = l3p['l3_policy']['id']
# Verify update l3 policy with explicit router rejected.
with self.router() as router:
router_id = router['router']['id']
data = {'l3_policy': {'routers': [router_id]}}
req = self.new_update_request('l3_policies', data, l3p_id)
data = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual('L3PolicyRoutersUpdateNotSupported',
data['NeutronError']['type'])
class NotificationTest(ResourceMappingTestCase):
def test_dhcp_notifier(self):
with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI,
'notify') as dhcp_notifier:
epg = self.create_endpoint_group(name="epg1")
epg_id = epg['endpoint_group']['id']
ep = self.create_endpoint(name="ep1", endpoint_group_id=epg_id)
self.assertEqual(ep['endpoint']['endpoint_group_id'], epg_id)
# REVISIT(rkukura): Check dictionaries for correct id, etc..
dhcp_notifier.assert_any_call(mock.ANY, mock.ANY,
"router.create.end")
dhcp_notifier.assert_any_call(mock.ANY, mock.ANY,
"network.create.end")
dhcp_notifier.assert_any_call(mock.ANY, mock.ANY,
"subnet.create.end")
dhcp_notifier.assert_any_call(mock.ANY, mock.ANY,
"port.create.end")
def test_nova_notifier(self):
with mock.patch.object(nova.Notifier,
'send_network_change') as nova_notifier:
epg = self.create_endpoint_group(name="epg1")
epg_id = epg['endpoint_group']['id']
ep = self.create_endpoint(name="ep1", endpoint_group_id=epg_id)
self.assertEqual(ep['endpoint']['endpoint_group_id'], epg_id)
# REVISIT(rkukura): Check dictionaries for correct id, etc..
nova_notifier.assert_any_call("create_router", {}, mock.ANY)
nova_notifier.assert_any_call("create_network", {}, mock.ANY)
nova_notifier.assert_any_call("create_subnet", {}, mock.ANY)
nova_notifier.assert_any_call("create_port", {}, mock.ANY)

View File

@@ -31,6 +31,7 @@ neutron.service_plugins =
gbp.neutron.group_policy.policy_drivers =
dummy = gbp.neutron.services.grouppolicy.drivers.dummy_driver:NoopDriver
implicit_policy = gbp.neutron.services.grouppolicy.drivers.implicit_policy:ImplicitPolicyDriver
resource_mapping = gbp.neutron.services.grouppolicy.drivers.resource_mapping:ResourceMappingDriver
[build_sphinx]
source-dir = doc/source