Remove_legacy_service_chain_code(2)

Change-Id: I27b9839b41408e94333e4aa5d5e14c6dd45c1643
This commit is contained in:
pulkitvajpayee07 2022-06-13 14:01:40 +05:30 committed by pulkit vajpayee
parent 549f0f3688
commit 1e44b3991f
38 changed files with 56 additions and 7525 deletions

View File

@ -23,7 +23,6 @@ from oslo_log import log as logging
from oslo_utils import excutils
from gbpservice.neutron.extensions import group_policy as gp_ext
from gbpservice.neutron.extensions import servicechain as sc_ext
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
LOG = logging.getLogger(__name__)
@ -73,16 +72,6 @@ class LocalAPI(object):
raise exc.GroupPolicyDeploymentError()
return group_policy_plugin
@property
def _servicechain_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN)
if not servicechain_plugin:
LOG.error("No Servicechain service plugin found.")
raise exc.GroupPolicyDeploymentError()
return servicechain_plugin
@property
def _trunk_plugin(self):
# REVISIT(rkukura): Need initialization method after all
@ -94,7 +83,7 @@ class LocalAPI(object):
# REVISIT(rkukura): Do create.start notification?
# REVISIT(rkukura): Check authorization?
reservation = None
if plugin in [self._group_policy_plugin, self._servicechain_plugin]:
if plugin in [self._group_policy_plugin]:
reservation = quota.QUOTAS.make_reservation(
context, context.tenant_id, {resource: 1}, plugin)
action = 'create_' + resource
@ -578,55 +567,6 @@ class LocalAPI(object):
except gp_ext.PolicyRuleSetNotFound:
LOG.warning('Policy Rule Set %s already deleted', prs_id)
def _get_servicechain_instance(self, plugin_context, sci_id):
return self._get_resource(self._servicechain_plugin, plugin_context,
'servicechain_instance', sci_id)
def _get_servicechain_instances(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(self._servicechain_plugin, plugin_context,
'servicechain_instances', filters)
def _create_servicechain_instance(self, plugin_context, attrs):
return self._create_resource(self._servicechain_plugin, plugin_context,
'servicechain_instance', attrs, False)
def _update_servicechain_instance(self, plugin_context, sci_id, attrs):
return self._update_resource(self._servicechain_plugin, plugin_context,
'servicechain_instance', sci_id, attrs,
False)
def _delete_servicechain_instance(self, plugin_context, sci_id):
try:
self._delete_resource(self._servicechain_plugin, plugin_context,
'servicechain_instance', sci_id, False)
except sc_ext.ServiceChainInstanceNotFound:
LOG.warning("servicechain %s already deleted", sci_id)
def _get_servicechain_spec(self, plugin_context, scs_id):
return self._get_resource(self._servicechain_plugin, plugin_context,
'servicechain_spec', scs_id)
def _get_servicechain_specs(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(self._servicechain_plugin, plugin_context,
'servicechain_specs', filters)
def _create_servicechain_spec(self, plugin_context, attrs):
return self._create_resource(self._servicechain_plugin, plugin_context,
'servicechain_spec', attrs, False)
def _update_servicechain_spec(self, plugin_context, scs_id, attrs):
return self._update_resource(self._servicechain_plugin, plugin_context,
'servicechain_spec', scs_id, attrs, False)
def _delete_servicechain_spec(self, plugin_context, scs_id):
try:
self._delete_resource(self._servicechain_plugin, plugin_context,
'servicechain_spec', scs_id)
except sc_ext.ServiceChainSpecNotFound:
LOG.warning("servicechain spec %s already deleted", scs_id)
def _get_policy_target(self, plugin_context, pt_id):
return self._get_resource(self._group_policy_plugin, plugin_context,
'policy_target', pt_id)
@ -668,10 +608,3 @@ class LocalAPI(object):
return self._update_resource(self._group_policy_plugin, plugin_context,
'policy_target_group', ptg_id, attrs,
False)
def _delete_policy_target_group(self, plugin_context, ptg_id):
try:
self._delete_resource(self._group_policy_plugin, plugin_context,
'policy_target_group', ptg_id)
except sc_ext.ServiceChainSpecNotFound:
LOG.warning("Policy Target Group %s already deleted", ptg_id)

View File

@ -31,8 +31,7 @@ from gbpservice.neutron.db.grouppolicy import ( # noqa
group_policy_mapping_db
)
from gbpservice.neutron.db import ( # noqa
implicitsubnetpool_db,
servicechain_db
implicitsubnetpool_db
)
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import ( # noqa
data_migrations,
@ -40,18 +39,11 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import ( # noqa
extension_db
)
from gbpservice.neutron.services.grouppolicy.drivers import ( # noqa
chain_mapping,
implicit_policy,
nsp_manager,
resource_mapping
)
from gbpservice.neutron.services.servicechain.plugins.ncp import ( # noqa
model
)
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import ( # noqa
heat_node_driver,
nfp_node_driver
)
from gbpservice.neutron.tests.unit.plugins.ml2plus.drivers import ( # noqa
extension_test
)

View File

@ -0,0 +1,43 @@
# Copyright 2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
"""remove_legacy_service
Revision ID: 47b999aa98e3
Revises: 68fcb81878c5
Create Date: 2022-05-23 16:17:16.315523
"""
# revision identifiers, used by Alembic.
revision = '47b999aa98e3'
down_revision = '68fcb81878c5'
def upgrade():
op.drop_table('service_profiles')
op.drop_table('sc_specs')
op.drop_table('sc_instances')
op.drop_table('sc_nodes')
op.drop_table('sc_instance_spec_mappings')
op.drop_table('sc_spec_node_associations')
op.drop_table('ncp_node_instance_network_function_mappings')
op.drop_table('ncp_node_instance_stacks')
op.drop_table('gpm_ptgs_servicechain_mapping')
op.drop_table('ncp_node_to_driver_mapping')
op.drop_table('ncp_service_targets')

View File

@ -1 +1 @@
68fcb81878c5
47b999aa98e3

View File

@ -1,636 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as pconst
from neutron_lib.plugins import directory
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
from sqlalchemy.orm import exc
from gbpservice._i18n import _
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.extensions import servicechain as schain
from gbpservice.neutron.services.servicechain.common import exceptions as s_exc
LOG = logging.getLogger(__name__)
MAX_IPV4_SUBNET_PREFIX_LENGTH = 31
MAX_IPV6_SUBNET_PREFIX_LENGTH = 127
class BaseSCResource(model_base.HasId, model_base.HasProject):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(length=16), nullable=True)
status_details = sa.Column(sa.String(length=4096), nullable=True)
class BaseSharedSCResource(BaseSCResource):
shared = sa.Column(sa.Boolean)
class SpecNodeAssociation(model_base.BASEV2):
"""Models one to many providing relation between Specs and Nodes."""
__tablename__ = 'sc_spec_node_associations'
servicechain_spec_id = sa.Column(
sa.String(36), sa.ForeignKey('sc_specs.id'), primary_key=True)
node_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_nodes.id'),
primary_key=True)
position = sa.Column(sa.Integer)
class InstanceSpecAssociation(model_base.BASEV2):
"""Models one to many providing relation between Instance and Specs."""
__tablename__ = 'sc_instance_spec_mappings'
servicechain_instance_id = sa.Column(
sa.String(36), sa.ForeignKey('sc_instances.id'), primary_key=True)
servicechain_spec_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_specs.id'),
primary_key=True)
position = sa.Column(sa.Integer)
class ServiceChainNode(model_base.BASEV2, BaseSharedSCResource):
"""ServiceChain Node"""
__tablename__ = 'sc_nodes'
config = sa.Column(sa.TEXT)
specs = orm.relationship(SpecNodeAssociation,
backref="nodes",
cascade='all, delete, delete-orphan')
service_type = sa.Column(sa.String(50), nullable=True)
service_profile_id = sa.Column(
sa.String(36), sa.ForeignKey('service_profiles.id'),
nullable=True)
class ServiceChainInstance(model_base.BASEV2, BaseSCResource):
"""Service chain instances"""
__tablename__ = 'sc_instances'
config_param_values = sa.Column(sa.String(4096))
specs = orm.relationship(
InstanceSpecAssociation,
backref='instances',
cascade='all,delete, delete-orphan',
order_by='InstanceSpecAssociation.position',
collection_class=ordering_list('position', count_from=1))
provider_ptg_id = sa.Column(sa.String(36),
# FixMe(Magesh) Issue with cascade on Delete
# sa.ForeignKey('gp_policy_target_groups.id'),
nullable=True)
consumer_ptg_id = sa.Column(sa.String(36),
# sa.ForeignKey('gp_policy_target_groups.id'),
nullable=True)
management_ptg_id = sa.Column(sa.String(36),
# sa.ForeignKey('gp_policy_target_groups.id'),
nullable=True)
classifier_id = sa.Column(sa.String(36),
# sa.ForeignKey('gp_policy_classifiers.id'),
nullable=True)
class ServiceChainSpec(model_base.BASEV2, BaseSharedSCResource):
""" ServiceChain Spec
"""
__tablename__ = 'sc_specs'
nodes = orm.relationship(
SpecNodeAssociation,
backref='specs', cascade='all, delete, delete-orphan',
order_by='SpecNodeAssociation.position',
collection_class=ordering_list('position', count_from=1))
config_param_names = sa.Column(sa.String(4096))
instances = orm.relationship(InstanceSpecAssociation,
backref="specs",
cascade='all, delete, delete-orphan')
class ServiceProfile(model_base.BASEV2, BaseSharedSCResource):
""" Service Profile
"""
__tablename__ = 'service_profiles'
vendor = sa.Column(sa.String(50))
# Not using ENUM for less painful upgrades. Validation will happen at the
# API level
insertion_mode = sa.Column(sa.String(50))
service_type = sa.Column(sa.String(50))
service_flavor = sa.Column(sa.String(1024))
nodes = orm.relationship(ServiceChainNode, backref="service_profile")
class ServiceChainDbPlugin(schain.ServiceChainPluginBase):
"""ServiceChain plugin interface implementation using SQLAlchemy models."""
# TODO(osms69): native bulk support
__native_bulk_support = False
__native_pagination_support = True
__native_sorting_support = True
def __init__(self, *args, **kwargs):
super(ServiceChainDbPlugin, self).__init__(*args, **kwargs)
@property
def _grouppolicy_plugin(self):
# REVISIT(Magesh): Need initialization method after all
# plugins are loaded to grab and store plugin.
grouppolicy_plugin = directory.get_plugin(pconst.GROUP_POLICY)
if not grouppolicy_plugin:
LOG.error("No Grouppolicy service plugin found.")
raise s_exc.ServiceChainDeploymentError()
return grouppolicy_plugin
# REVISIT: This is temporary, the correct fix is to use the
# project_id in the context. Moreover, patch.py already patches
# thi, so it should not be required here.
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
return tenant_id
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_servicechain_node(self, context, node_id):
try:
return db_api.get_by_id(context, ServiceChainNode, node_id)
except exc.NoResultFound:
raise schain.ServiceChainNodeNotFound(sc_node_id=node_id)
def _get_servicechain_spec(self, context, spec_id):
try:
return db_api.get_by_id(context, ServiceChainSpec, spec_id)
except exc.NoResultFound:
raise schain.ServiceChainSpecNotFound(sc_spec_id=spec_id)
def _get_servicechain_instance(self, context, instance_id):
try:
return db_api.get_by_id(context, ServiceChainInstance, instance_id)
except exc.NoResultFound:
raise schain.ServiceChainInstanceNotFound(
sc_instance_id=instance_id)
def _get_service_profile(self, context, profile_id):
try:
return db_api.get_by_id(context, ServiceProfile, profile_id)
except exc.NoResultFound:
raise schain.ServiceProfileNotFound(
profile_id=profile_id)
def _populate_common_fields_in_dict(self, db_ref):
res = {'id': db_ref['id'],
'tenant_id': db_ref['tenant_id'],
'name': db_ref['name'],
'description': db_ref['description'],
'status': db_ref['status'],
'status_details': db_ref['status_details'],
'shared': db_ref.get('shared', False)}
return res
def _make_sc_node_dict(self, sc_node, fields=None):
res = self._populate_common_fields_in_dict(sc_node)
res['service_profile_id'] = sc_node['service_profile_id']
res['service_type'] = sc_node['service_type']
res['config'] = sc_node['config']
res['servicechain_specs'] = [sc_spec['servicechain_spec_id']
for sc_spec in sc_node['specs']]
return db_api.resource_fields(res, fields)
def _make_sc_spec_dict(self, spec, fields=None):
res = self._populate_common_fields_in_dict(spec)
res['config_param_names'] = spec.get('config_param_names')
res['nodes'] = [sc_node['node_id'] for sc_node in spec['nodes']]
res['instances'] = [x['servicechain_instance_id'] for x in
spec['instances']]
return db_api.resource_fields(res, fields)
def _make_sc_instance_dict(self, instance, fields=None):
res = {'id': instance['id'],
'tenant_id': instance['tenant_id'],
'name': instance['name'],
'description': instance['description'],
'config_param_values': instance['config_param_values'],
'provider_ptg_id': instance['provider_ptg_id'],
'consumer_ptg_id': instance['consumer_ptg_id'],
'management_ptg_id': instance['management_ptg_id'],
'classifier_id': instance['classifier_id'],
'status': instance['status'],
'status_details': instance['status_details']}
res['servicechain_specs'] = [sc_spec['servicechain_spec_id']
for sc_spec in instance['specs']]
return db_api.resource_fields(res, fields)
def _make_service_profile_dict(self, profile, fields=None):
res = self._populate_common_fields_in_dict(profile)
res['service_type'] = profile['service_type']
res['service_flavor'] = profile['service_flavor']
res['vendor'] = profile['vendor']
res['insertion_mode'] = profile['insertion_mode']
res['nodes'] = [node['id'] for node in profile['nodes']]
return db_api.resource_fields(res, fields)
@staticmethod
def validate_service_type(service_type):
if service_type not in schain.sc_supported_type:
raise schain.ServiceTypeNotSupported(sc_service_type=service_type)
@log.log_method_call
def create_servicechain_node(self, context, servicechain_node):
node = servicechain_node['servicechain_node']
tenant_id = self._get_tenant_id_for_create(context, node)
with db_api.CONTEXT_WRITER.using(context):
node_db = ServiceChainNode(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=node['name'], description=node['description'],
service_profile_id=node.get('service_profile_id'),
service_type=node.get('service_type'),
config=node['config'], shared=node['shared'],
status=node.get('status'),
status_details=node.get('status_details'))
context.session.add(node_db)
return self._make_sc_node_dict(node_db)
@log.log_method_call
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node, set_params=False):
node = servicechain_node['servicechain_node']
with db_api.CONTEXT_WRITER.using(context):
node_db = self._get_servicechain_node(context,
servicechain_node_id)
node_db.update(node)
# Update the config param names derived for the associated specs
spec_node_associations = node_db.specs
for node_spec in spec_node_associations:
spec_id = node_spec.servicechain_spec_id
spec_db = self._get_servicechain_spec(context, spec_id)
self._process_nodes_for_spec(
context, spec_db, self._make_sc_spec_dict(spec_db),
set_params=set_params)
return self._make_sc_node_dict(node_db)
@log.log_method_call
def delete_servicechain_node(self, context, servicechain_node_id):
with db_api.CONTEXT_WRITER.using(context):
node_db = self._get_servicechain_node(context,
servicechain_node_id)
if node_db.specs:
raise schain.ServiceChainNodeInUse(
node_id=servicechain_node_id)
context.session.delete(node_db)
@log.log_method_call
def get_servicechain_node(self, context, servicechain_node_id,
fields=None):
node = self._get_servicechain_node(context, servicechain_node_id)
return self._make_sc_node_dict(node, fields)
@log.log_method_call
def get_servicechain_nodes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'servicechain_node', limit,
marker)
return db_api.get_collection(context, ServiceChainNode,
self._make_sc_node_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log_method_call
def get_servicechain_nodes_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceChainNode,
filters=filters)
def _process_nodes_for_spec(self, context, spec_db, spec,
set_params=True):
if 'nodes' in spec:
self._set_nodes_for_spec(context, spec_db, spec['nodes'],
set_params=set_params)
del spec['nodes']
return spec
def _set_nodes_for_spec(self, context, spec_db, nodes_id_list,
set_params=True):
if not nodes_id_list:
spec_db.nodes = []
spec_db.config_param_names = '[]'
return
with context.session.begin(subtransactions=True):
# We will first check if the new list of nodes is valid
filters = {'id': [n_id for n_id in nodes_id_list]}
nodes_in_db = db_api.get_collection_query(context,
ServiceChainNode,
filters=filters)
nodes_list = [n_db['id'] for n_db in nodes_in_db]
for node_id in nodes_id_list:
if node_id not in nodes_list:
# If we find an invalid node id in the list we
# do not perform the update
raise schain.ServiceChainNodeNotFound(sc_node_id=node_id)
# New list of nodes is valid so we will first reset the
# existing list and then add each node in order.
# Note that the list could be empty in which case we interpret
# it as clearing existing nodes.
spec_db.nodes = []
if set_params:
spec_db.config_param_names = '[]'
for node_id in nodes_id_list:
if set_params:
sc_node = self.get_servicechain_node(context, node_id)
node_dict = jsonutils.loads(sc_node['config'])
config_params = (node_dict.get('parameters') or
node_dict.get('Parameters'))
if config_params:
if not spec_db.config_param_names:
spec_db.config_param_names = str(
list(config_params.keys()))
else:
config_param_names = ast.literal_eval(
spec_db.config_param_names)
config_param_names.extend(
list(config_params.keys()))
spec_db.config_param_names = str(
config_param_names)
assoc = SpecNodeAssociation(servicechain_spec_id=spec_db.id,
node_id=node_id)
spec_db.nodes.append(assoc)
def _process_specs_for_instance(self, context, instance_db, instance):
if 'servicechain_specs' in instance:
self._set_specs_for_instance(context, instance_db,
instance['servicechain_specs'])
del instance['servicechain_specs']
return instance
def _set_specs_for_instance(self, context, instance_db, spec_id_list):
if not spec_id_list:
instance_db.spec_ids = []
return
with context.session.begin(subtransactions=True):
filters = {'id': spec_id_list}
specs_in_db = db_api.get_collection_query(context,
ServiceChainSpec,
filters=filters)
specs_list = set(spec_db['id'] for spec_db in specs_in_db)
for spec_id in spec_id_list:
if spec_id not in specs_list:
# Do not update if spec ID is invalid
raise schain.ServiceChainSpecNotFound(sc_spec_id=spec_id)
# Reset the existing list and then add each spec in order. The list
# could be empty in which case we clear the existing specs.
instance_db.specs = []
for spec_id in spec_id_list:
assoc = InstanceSpecAssociation(
servicechain_instance_id=instance_db.id,
servicechain_spec_id=spec_id)
instance_db.specs.append(assoc)
def _get_instances_from_policy_target(self, context, policy_target):
with context.session.begin(subtransactions=True):
ptg_id = policy_target['policy_target_group_id']
scis_p = self.get_servicechain_instances(
context, {'provider_ptg_id': [ptg_id]})
scis_c = self.get_servicechain_instances(
context, {'consumer_ptg_id': [ptg_id]})
# Don't return duplicates
result = []
seen = set()
for sci in scis_p + scis_c:
if sci['id'] not in seen:
seen.add(sci['id'])
result.append(sci)
return result
@log.log_method_call
def create_servicechain_spec(self, context, servicechain_spec,
set_params=True):
spec = servicechain_spec['servicechain_spec']
tenant_id = self._get_tenant_id_for_create(context, spec)
with db_api.CONTEXT_WRITER.using(context):
spec_db = ServiceChainSpec(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=spec['name'],
description=spec['description'],
shared=spec['shared'],
status=spec.get('status'),
status_details=spec.get('status_details'))
self._process_nodes_for_spec(context, spec_db, spec,
set_params=set_params)
context.session.add(spec_db)
return self._make_sc_spec_dict(spec_db)
@log.log_method_call
def update_servicechain_spec(self, context, spec_id,
servicechain_spec, set_params=True):
spec = servicechain_spec['servicechain_spec']
with db_api.CONTEXT_WRITER.using(context):
spec_db = self._get_servicechain_spec(context,
spec_id)
spec = self._process_nodes_for_spec(context, spec_db, spec,
set_params=set_params)
spec_db.update(spec)
return self._make_sc_spec_dict(spec_db)
@log.log_method_call
def delete_servicechain_spec(self, context, spec_id):
policy_actions = self._grouppolicy_plugin.get_policy_actions(
context, filters={"action_value": [spec_id]})
if policy_actions:
raise schain.ServiceChainSpecInUse(spec_id=spec_id)
with db_api.CONTEXT_WRITER.using(context):
spec_db = self._get_servicechain_spec(context,
spec_id)
if spec_db.instances:
raise schain.ServiceChainSpecInUse(spec_id=spec_id)
context.session.delete(spec_db)
@log.log_method_call
def get_servicechain_spec(self, context, spec_id,
fields=None):
spec = self._get_servicechain_spec(context, spec_id)
return self._make_sc_spec_dict(spec, fields)
@log.log_method_call
def get_servicechain_specs(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'servicechain_spec', limit,
marker)
return db_api.get_collection(context, ServiceChainSpec,
self._make_sc_spec_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log_method_call
def get_servicechain_specs_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceChainSpec,
filters=filters)
@log.log_method_call
def create_servicechain_instance(self, context, servicechain_instance):
instance = servicechain_instance['servicechain_instance']
tenant_id = self._get_tenant_id_for_create(context, instance)
with db_api.CONTEXT_WRITER.using(context):
if not instance.get('management_ptg_id'):
management_groups = (
self._grouppolicy_plugin.get_policy_target_groups(
context, {'service_management': [True],
'tenant_id': [instance.get('tenant_id')]}))
if not management_groups:
# Fall back on shared service management
management_groups = (
self._grouppolicy_plugin.get_policy_target_groups(
context, {'service_management': [True]}))
if management_groups:
instance['management_ptg_id'] = management_groups[0]['id']
instance_db = ServiceChainInstance(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id, name=instance['name'],
description=instance['description'],
config_param_values=instance['config_param_values'],
provider_ptg_id=instance.get('provider_ptg_id'),
consumer_ptg_id=instance.get('consumer_ptg_id'),
management_ptg_id=instance.get('management_ptg_id'),
classifier_id=instance.get('classifier_id'),
status=instance.get('status'),
status_details=instance.get('status_details'))
self._process_specs_for_instance(context, instance_db, instance)
context.session.add(instance_db)
return self._make_sc_instance_dict(instance_db)
@log.log_method_call
def update_servicechain_instance(self, context, servicechain_instance_id,
servicechain_instance):
instance = servicechain_instance['servicechain_instance']
with db_api.CONTEXT_WRITER.using(context):
instance_db = self._get_servicechain_instance(
context, servicechain_instance_id)
instance = self._process_specs_for_instance(context, instance_db,
instance)
instance_db.update(instance)
return self._make_sc_instance_dict(instance_db)
@log.log_method_call
def delete_servicechain_instance(self, context, servicechain_instance_id):
with db_api.CONTEXT_WRITER.using(context):
instance_db = self._get_servicechain_instance(
context, servicechain_instance_id)
context.session.delete(instance_db)
@log.log_method_call
def get_servicechain_instance(self, context, sc_instance_id, fields=None):
instance_db = self._get_servicechain_instance(context, sc_instance_id)
return self._make_sc_instance_dict(instance_db, fields)
@log.log_method_call
def get_servicechain_instances(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'servicechain_instance',
limit, marker)
return db_api.get_collection(context, ServiceChainInstance,
self._make_sc_instance_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log_method_call
def get_servicechain_instances_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceChainInstance,
filters=filters)
@log.log_method_call
def get_service_profiles_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceProfile,
filters=filters)
@log.log_method_call
def create_service_profile(self, context, service_profile):
profile = service_profile['service_profile']
tenant_id = self._get_tenant_id_for_create(context, profile)
with db_api.CONTEXT_WRITER.using(context):
profile_db = ServiceProfile(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=profile['name'], description=profile['description'],
service_type=profile.get('service_type'),
insertion_mode=profile.get('insertion_mode'),
vendor=profile.get('vendor'),
service_flavor=profile.get('service_flavor'),
shared=profile.get('shared'),
status=profile.get('status'),
status_details=profile.get('status_details'))
context.session.add(profile_db)
return self._make_service_profile_dict(profile_db)
@log.log_method_call
def update_service_profile(self, context, service_profile_id,
service_profile):
profile = service_profile['service_profile']
with db_api.CONTEXT_WRITER.using(context):
profile_db = self._get_service_profile(context,
service_profile_id)
profile_db.update(profile)
return self._make_service_profile_dict(profile_db)
@log.log_method_call
def delete_service_profile(self, context, service_profile_id):
with db_api.CONTEXT_WRITER.using(context):
profile_db = self._get_service_profile(context,
service_profile_id)
if profile_db.nodes:
raise schain.ServiceProfileInUse(
profile_id=service_profile_id)
context.session.delete(profile_db)
@log.log_method_call
def get_service_profile(self, context, service_profile_id, fields=None):
profile_db = self._get_service_profile(
context, service_profile_id)
return self._make_service_profile_dict(profile_db, fields)
@log.log_method_call
def get_service_profiles(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'service_profile',
limit, marker)
return db_api.get_collection(context, ServiceProfile,
self._make_service_profile_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)

View File

@ -10,10 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutron.api import extensions
from neutron.api.v2 import resource as neutron_resource
from neutron.db.db_base_plugin_v2 import _constants
from neutron.db import l3_db
from neutron.db import models_v2
@ -30,7 +27,6 @@ from oslo_log import log
from oslo_utils import excutils
from gbpservice._i18n import _
from gbpservice.common import utils as gbp_utils
from gbpservice.neutron.db import api as db_api
@ -41,22 +37,7 @@ if not hasattr(quota_resource, 'GBP_PATCHED'):
orig_count_resource = quota_resource._count_resource
def new_count_resource(*kwargs):
request = gbp_utils.get_obj_from_stack(neutron_resource.Request)
orig_plugins = directory._get_plugin_directory()._plugins
if request and request.environ['PATH_INFO'] == (
'/servicechain/service_profiles.json'):
new_plugins = copy.copy(directory._get_plugin_directory()._plugins)
# The service_profile resource is supported by the FLAVORS
# plugin as well as the SERVICECHAIN plugin. At this point
# we know that we are dealing with the service_profile from
# SERVICECHAIN, and since the original implementation of the
# count_resource will think of service_profile from FLAVORS
# (in the sorted order of plugins, FLAVORS preceedes SERVICECHAIN)
# we temporarily remove the FLAVORS plugin reference from the
# plugins directory.
new_plugins.pop('FLAVORS')
directory._get_plugin_directory()._plugins = new_plugins
count_resource = orig_count_resource(*kwargs)
directory._get_plugin_directory()._plugins = orig_plugins
return count_resource
@ -160,22 +141,11 @@ def extend_resources(self, version, attr_map):
for res, resource_attrs in list(extended_attrs.items()):
res_to_update = attr_map.setdefault(res, {})
if self._is_sub_resource(res_to_update):
# kentwu: service_profiles defined in servicechain
# plugin has a name conflict with service_profiles
# sub-resource defined in flavor plugin. The attr_map
# can only have one service_profiles so here we make
# this very same service_profiles to have the
# attributes from both plugins. This behavior is now
# consistent with Pike.
if (ext_name == 'servicechain' and
res == 'service_profiles'):
res_to_update.update(resource_attrs)
# in the case of an existing sub-resource, we need to
# update the parameters content rather than overwrite
# it, and also keep the description of the parent
# resource unmodified
else:
res_to_update['parameters'].update(
res_to_update['parameters'].update(
resource_attrs['parameters'])
else:
res_to_update.update(resource_attrs)

View File

@ -1,466 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import resource_helper
from neutron_lib.api import converters as conv
from neutron_lib.api import extensions
from neutron_lib.api import validators as valid
from neutron_lib import constants as nlib_const
from neutron_lib import exceptions as nexc
from neutron_lib.plugins import constants
from neutron_lib.services import base as service_base
from oslo_config import cfg
from oslo_log import log as logging
import six
from gbpservice._i18n import _
from gbpservice.neutron import extensions as gbp_extensions
import gbpservice.neutron.extensions.group_policy # noqa
from gbpservice.neutron.services.servicechain.common import constants as scc
# The code below is a monkey patch of key Neutron's modules. This is needed for
# the GBP service to be loaded correctly. GBP extensions' path is added
# to Neutron's so that it's found at extension scanning time.
neutron_extensions.append_api_extensions_path(gbp_extensions.__path__)
LOG = logging.getLogger(__name__)
# Service Chain Exceptions
class ServiceProfileNotFound(nexc.NotFound):
message = _("ServiceProfile %(profile_id)s could not be found")
class ServiceProfileInUse(nexc.NotFound):
message = _("Unable to complete operation, ServiceProfile "
"%(profile_id)s is in use")
class ServiceChainNodeNotFound(nexc.NotFound):
message = _("ServiceChainNode %(sc_node_id)s could not be found")
class ServiceChainSpecNotFound(nexc.NotFound):
message = _("ServiceChainSpec %(sc_spec_id)s could not be found")
class ServiceChainInstanceNotFound(nexc.NotFound):
message = _("ServiceChainInstance %(sc_instance_id)s could not be found")
class ServiceChainNodeInUse(nexc.InUse):
message = _("Unable to complete operation, ServiceChainNode "
"%(node_id)s is in use")
class ServiceChainSpecInUse(nexc.InUse):
message = _("Unable to complete operation, ServiceChainSpec "
"%(spec_id)s is in use")
class ServiceTypeNotFound(nexc.NotFound):
message = _("ServiceType %(service_type_id) could not be found")
class ServiceTypeNotSupported(nexc.NotFound):
message = _("ServiceType %(service_type_id) not supported")
class PortNotFound(nexc.NotFound):
message = _("Port %(port_id)s could not be found")
def _validate_str_list(data, valid_values=None):
if not isinstance(data, list):
msg = ("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = valid.validate_string(item)
if msg:
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = ("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
valid.validators['type:string_list'] = _validate_str_list
SERVICECHAIN_NODES = 'servicechain_nodes'
SERVICECHAIN_SPECS = 'servicechain_specs'
SERVICECHAIN_INSTANCES = 'servicechain_instances'
SERVICE_PROFILES = 'service_profiles'
RESOURCE_ATTRIBUTE_MAP = {
SERVICECHAIN_NODES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:gbp_resource_name': None}, 'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_details': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'service_type': {'allow_post': True, 'allow_put': False,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None},
'service_profile_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'config': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'required': True, 'is_visible': True},
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
'default': False,
'convert_to': conv.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
},
SERVICECHAIN_SPECS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:gbp_resource_name': None},
'default': '', 'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_details': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'nodes': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'default': None, 'is_visible': True,
'required': True},
'config_param_names': {'allow_post': False, 'allow_put': False,
'validate': {'type:string_list': None},
'default': [], 'is_visible': True},
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
'default': False,
'convert_to': conv.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
},
SERVICECHAIN_INSTANCES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:gbp_resource_name': None},
'default': '', 'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_details': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'servicechain_specs': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'default': None, 'is_visible': True,
'required': True},
'provider_ptg_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'consumer_ptg_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'management_ptg_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'classifier_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'config_param_values': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'default': "", 'is_visible': True},
},
SERVICE_PROFILES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:gbp_resource_name': None},
'default': '', 'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_details': {'allow_post': False, 'allow_put': False,
'is_visible': True},
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
'default': False,
'convert_to': conv.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
'vendor': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'insertion_mode': {'allow_post': True, 'allow_put': True,
'validate': {'type:values':
scc.VALID_INSERTION_MODES},
'is_visible': True, 'default': None},
'service_type': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'required': True},
'service_flavor': {'allow_post': True, 'allow_put': True,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None},
},
}
service_chain_quota_opts = [
cfg.IntOpt('quota_servicechain_node',
default=-1,
help=_('Number of Service Chain Nodes allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_servicechain_spec',
default=-1,
help=_('Number of Service Chain Specs allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_servicechain_instance',
default=-1,
help=_('Number of Service Chain Instances allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_service_profile',
default=-1,
help=_('Number of Service Profiles allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(service_chain_quota_opts, 'QUOTAS')
class Servicechain(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Service Chain Abstraction"
@classmethod
def get_alias(cls):
return "servicechain"
@classmethod
def get_description(cls):
return "Extension for Service Chain Abstraction"
@classmethod
def get_namespace(cls):
return "https://wiki.openstack.org/wiki/Neutron/sc/v2.0/"
@classmethod
def get_updated(cls):
return "2014-08-03T12:00:00-00:00"
@classmethod
def get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
gbp_extensions.register_plurals(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.SERVICECHAIN,
register_quota=True)
@classmethod
def get_plugin_interface(cls):
return ServiceChainPluginBase
def update_attributes_map(self, attributes):
# REVISIT: temporary solution until the services
# are removed fully.
if 'service_profiles' in attributes:
attributes['service_profiles'].pop('parent')
attributes['service_profiles'].pop('parameters')
super(Servicechain, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class ServiceChainPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.SERVICECHAIN
def get_plugin_type(self):
return constants.SERVICECHAIN
def get_plugin_description(self):
return 'Service Chain plugin'
def update_chains_pt_added(self, context, policy_target, instance_id):
""" Auto scaling function.
Override this method to react to policy target creation.
"""
pass
def update_chains_pt_removed(self, context, policy_target, instance_id):
""" Auto scaling function.
Override this method to react to policy target deletion.
"""
pass
def update_chains_consumer_added(self, context, policy_target_group,
instance_id):
""" Auto scaling function.
Override this method to react to policy target group addition as
a consumer of a chain.
"""
pass
def update_chains_consumer_removed(self, context, policy_target_group,
instance_id):
""" Auto scaling function.
Override this method to react to policy target group removed as a
consumer of a chain
"""
pass
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group,
instance_id):
""" Utility function.
Override this method to react to policy target group update
"""
pass
@abc.abstractmethod
def get_servicechain_nodes(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_servicechain_node(self, context, servicechain_node_id,
fields=None):
pass
@abc.abstractmethod
def create_servicechain_node(self, context, servicechain_node):
pass
@abc.abstractmethod
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node):
pass
@abc.abstractmethod
def delete_servicechain_node(self, context, servicechain_node_id):
pass
@abc.abstractmethod
def get_servicechain_specs(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_servicechain_spec(self, context, servicechain_spec_id,
fields=None):
pass
@abc.abstractmethod
def create_servicechain_spec(self, context, servicechain_spec):
pass
@abc.abstractmethod
def update_servicechain_spec(self, context, servicechain_spec_id,
servicechain_spec):
pass
@abc.abstractmethod
def delete_servicechain_spec(self, context, servicechain_spec_id):
pass
@abc.abstractmethod
def get_servicechain_instances(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_servicechain_instance(self, context, servicechain_instance_id,
fields=None):
pass
@abc.abstractmethod
def create_servicechain_instance(self, context, servicechain_instance):
pass
@abc.abstractmethod
def update_servicechain_instance(self, context, servicechain_instance_id,
servicechain_instance):
pass
@abc.abstractmethod
def delete_servicechain_instance(self, context, servicechain_instance_id):
pass
@abc.abstractmethod
def create_service_profile(self, context, service_profile):
pass
@abc.abstractmethod
def update_service_profile(self, context, service_profile_id,
service_profile):
pass
@abc.abstractmethod
def delete_service_profile(self, context, service_profile_id):
pass
@abc.abstractmethod
def get_service_profile(self, context, service_profile_id, fields=None):
pass
@abc.abstractmethod
def get_service_profiles(self, context, filters=None, fields=None):
pass

View File

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import (
mech_openvswitch as base)
from neutron_lib.api.definitions import portbindings
from neutron_lib.plugins.ml2 import api
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
class TrafficStitchingMechanismGBPDriver(base.OpenvswitchMechanismDriver):
"""Traffic Stitching Mechanism Driver for GBP.
This driver makes sure that service targets are bound with port_filter and
hybrid_mode set to false. This should disable port security and anti
spoofing rules for these special ports.
"""
def try_to_bind_segment_for_agent(self, context, segment, agent):
vif_details = self.vif_details
if self.check_segment_for_agent(segment, agent):
if context.current['name'].startswith(
'pt_' + plumber_base.SERVICE_TARGET_NAME_PREFIX):
vif_details = {portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False}
context.set_binding(
segment[api.ID], self.vif_type, vif_details)
return True
else:
return False

View File

@ -39,8 +39,6 @@ from gbpservice.neutron.services.grouppolicy import (
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
from gbpservice.neutron.services.grouppolicy.common import utils
from gbpservice.neutron.services.servicechain.plugins.ncp import (
model as ncp_model)
LOG = logging.getLogger(__name__)
@ -643,8 +641,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
pt_ids = policy_target_group['policy_targets']
for pt in self.get_policy_targets(context.elevated(),
{'id': pt_ids}):
if (pt['port_id'] and self._is_port_bound(pt['port_id']) and
not (self._is_service_target(context, pt['id']))):
if pt['port_id'] and self._is_port_bound(pt['port_id']):
raise gp_exc.PolicyTargetGroupInUse(
policy_target_group=policy_target_group_id)
policy_context = p_context.PolicyTargetGroupContext(
@ -1790,10 +1787,6 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
return (port.get('binding:vif_type') not in not_bound) and port.get(
'binding:host_id') and (port['device_owner'] or port['device_id'])
def _is_service_target(self, context, pt_id):
return bool(ncp_model.get_service_targets_count(
context.session, pt_id))
def _ensure_tenant(self, context, resource):
# TODO(Sumit): This check is ideally not required, but a bunch of UTs
# are not setup correctly to populate the tenant_id, hence we

View File

@ -1,42 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ServiceChainNotificationsMixin(object):
def _notify_sc_plugin_pt_added(self, context, policy_target, instance_id):
if self._servicechain_plugin:
self._servicechain_plugin.update_chains_pt_added(
context, policy_target, instance_id)
def _notify_sc_plugin_pt_removed(self, context, policy_target,
instance_id):
if self._servicechain_plugin:
self._servicechain_plugin.update_chains_pt_removed(
context, policy_target, instance_id)
def _notify_sc_consumer_added(self, context, policy_target_group,
instance_id):
if self._servicechain_plugin:
self._servicechain_plugin.update_chains_consumer_added(
context, policy_target_group, instance_id)
def _notify_sc_consumer_removed(self, context, policy_target_group,
instance_id):
if self._servicechain_plugin:
self._servicechain_plugin.update_chains_consumer_removed(
context, policy_target_group, instance_id)
def _notify_ptg_updated(self, context, old, current, instance_id):
if self._servicechain_plugin:
self._servicechain_plugin.policy_target_group_updated(
context, old, current, instance_id)

View File

@ -1,23 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
INSERTION_MODE_L2 = 'l2'
INSERTION_MODE_L3 = 'l3'
INSERTION_MODE_BITW = 'bitw'
INSERTION_MODE_TAP = 'tap'
INSERTION_MODE_NONE = None
VALID_INSERTION_MODES = [INSERTION_MODE_L2,
INSERTION_MODE_L3,
INSERTION_MODE_BITW,
INSERTION_MODE_TAP,
INSERTION_MODE_NONE]

View File

@ -1,46 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions used by ServiceChain plugin and drivers."""
from neutron_lib import exceptions
from gbpservice._i18n import _
class ServiceChainDriverError(exceptions.NeutronException):
"""ServiceChain driver call failed."""
message = _("%(method)s failed.")
class ServiceChainException(exceptions.NeutronException):
"""Base for servicechain driver exceptions returned to user."""
pass
class ServiceChainBadRequest(exceptions.BadRequest, ServiceChainException):
"""Base for servicechain driver bad request exceptions returned to user."""
pass
class ServiceChainDeploymentError(ServiceChainException):
message = _("Deployment not configured properly. See logs for details.")
class InvalidServiceTypeForReferenceDriver(ServiceChainBadRequest):
message = _("The reference service chain driver only supports the services"
" Loadbalancer and Firewall services in a Service Chain Spec")
class NodeUpdateNotSupported(ServiceChainBadRequest):
message = _("The configured service chain driver does not support Service "
"Chain Node config update")

View File

@ -1,34 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from gbpservice._i18n import _
service_chain_opts = [
cfg.ListOpt('node_drivers',
default=['node_dummy'],
help=_("An ordered list of service chain node drivers "
"entrypoints to be loaded from the "
"gbpservice.neutron.servicechain.ncp_drivers "
"namespace.")),
cfg.StrOpt('node_plumber',
default='dummy_plumber',
help=_("The plumber used by the Node Composition Plugin "
"for service plumbing. Entrypoint loaded from the "
"gbpservice.neutron.servicechain.ncp_plumbers "
"namespace."))
]
cfg.CONF.register_opts(service_chain_opts, "node_composition_plugin")

View File

@ -1,237 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from gbpservice.common import utils
from gbpservice.neutron.services.grouppolicy.drivers import resource_mapping
from gbpservice.neutron.services.servicechain.plugins.ncp import model
def get_gbp_plugin():
return directory.get_plugin("GROUP_POLICY")
def get_node_driver_context(sc_plugin, context, sc_instance,
current_node, original_node=None,
management_group=None, service_targets=None):
admin_context = utils.admin_context(context)
specs = sc_plugin.get_servicechain_specs(
admin_context, filters={'id': sc_instance['servicechain_specs']})
position = _calculate_node_position(specs, current_node['id'])
provider, _ = _get_ptg_or_ep(
admin_context, sc_instance['provider_ptg_id'])
consumer, is_consumer_external = _get_ptg_or_ep(
admin_context, sc_instance['consumer_ptg_id'])
management, _ = _get_ptg_or_ep(admin_context,
sc_instance['management_ptg_id'])
classifier = get_gbp_plugin().get_policy_classifier(
admin_context, sc_instance['classifier_id'])
current_profile = sc_plugin.get_service_profile(
admin_context, current_node['service_profile_id'])
original_profile = sc_plugin.get_service_profile(
admin_context,
original_node['service_profile_id']) if original_node else None
if not service_targets:
service_targets = model.get_service_targets(
admin_context.session, servicechain_instance_id=sc_instance['id'],
position=position, servicechain_node_id=current_node['id'])
return NodeDriverContext(sc_plugin=sc_plugin,
context=context,
service_chain_instance=sc_instance,
service_chain_specs=specs,
current_service_chain_node=current_node,
current_service_profile=current_profile,
provider_group=provider,
consumer_group=consumer,
management_group=management,
original_service_chain_node=original_node,
original_service_profile=original_profile,
service_targets=service_targets,
position=position,
classifier=classifier,
is_consumer_external=is_consumer_external)
def _get_ptg_or_ep(context, group_id):
if group_id == resource_mapping.SCI_CONSUMER_NOT_AVAILABLE:
return None, False
group = None
is_group_external = False
# skipping policy target group status call to avoid loop while
# getting servicechain instance status
fields = ['consumed_policy_rule_sets', 'description',
'enforce_service_chains', 'id', 'l2_policy_id', 'name',
'network_service_policy_id', 'policy_targets',
'provided_policy_rule_sets', 'proxied_group_id',
'proxy_group_id', 'proxy_type', 'service_management', 'shared',
'subnets', 'tenant_id']
if group_id:
groups = get_gbp_plugin().get_policy_target_groups(
context, filters={'id': [group_id]}, fields=fields)
if not groups:
groups = get_gbp_plugin().get_external_policies(
context, filters={'id': [group_id]})
if groups:
is_group_external = True
if groups:
group = groups[0]
return (group, is_group_external)
def _calculate_node_position(specs, node_id):
for spec in specs:
pos = 0
for node in spec['nodes']:
pos += 1
if node_id == node:
return pos
class NodeDriverContext(object):
"""Context passed down to NCP Node Drivers."""
def __init__(self, sc_plugin, context, service_chain_instance,
service_chain_specs, current_service_chain_node, position,
current_service_profile, provider_group, consumer_group=None,
management_group=None, original_service_chain_node=None,
original_service_profile=None, service_targets=None,
classifier=None, is_consumer_external=False):
self._gbp_plugin = get_gbp_plugin()
self._sc_plugin = sc_plugin
self._plugin_context = context
self._admin_context = None
self._service_chain_instance = service_chain_instance
self._current_service_chain_node = current_service_chain_node
self._current_service_profile = current_service_profile
self._original_service_chain_node = original_service_chain_node
self._original_service_profile = original_service_profile
self._service_targets = service_targets
self._service_chain_specs = service_chain_specs
self._provider_group = provider_group
self._consumer_group = consumer_group
self._management_group = management_group
self._classifier = classifier
self._is_consumer_external = is_consumer_external
self._relevant_specs = None
self._core_plugin = directory.get_plugin()
self._l3_plugin = directory.get_plugin(constants.L3)
self._position = position
@property
def gbp_plugin(self):
return self._gbp_plugin
@property
def sc_plugin(self):
return self._sc_plugin
@property
def core_plugin(self):
return self._core_plugin
@property
def l3_plugin(self):
return self._l3_plugin
@property
def plugin_context(self):
return self._plugin_context
@property
def plugin_session(self):
return self._plugin_context.session
@property
def session(self):
return self.plugin_session
@property
def admin_context(self):
if not self._admin_context:
self._admin_context = utils.admin_context(self.plugin_context)
return self._admin_context
@property
def admin_session(self):
return self.admin_context.session
@property
def instance(self):
return self._service_chain_instance
@property
def current_node(self):
return self._current_service_chain_node
@property
def current_profile(self):
return self._current_service_profile
@property
def current_position(self):
return self._position
@property
def original_node(self):
return self._original_service_chain_node
@property
def original_profile(self):
return self._original_service_profile
@property
def is_consumer_external(self):
return self._is_consumer_external
@property
def relevant_specs(self):
"""Get specs on the SCI containing this particular Node."""
if not self._relevant_specs:
self._relevant_specs = [x for x in self._service_chain_specs if
self.current_node['id'] in x['nodes']]
return self._relevant_specs
@property
def provider(self):
return self._provider_group
@property
def consumer(self):
return self._consumer_group
@property
def management(self):
return self._management_group
@property
def classifier(self):
return self._classifier
def get_service_targets(self, update=False):
""" Returns the service targets assigned for this service if any.
The result looks like the following:
{
"provider": [pt_uuids],
"consumer": [pt_uuids],
"management": [pt_uuids],
}
"""
if update:
self._service_targets = model.get_service_targets(
self.session, servicechain_instance_id=self.instance['id'],
position=self.current_position,
servicechain_node_id=self.current_node['id'])
return self._service_targets

View File

@ -1,231 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class NodeDriverBase(object):
"""Node Driver Base class for Node Composition Plugin (NCP).
A Node Driver is the fundamental unit of the NCP service chain plugin.
It is invoked every time an operation has to be executed on Service Node
instances (eg. services that are part of a deployed chain)
which the Node Driver is capable of deploy, destroy and update.
The Node Driver may expose resource needs to the NCP plugin, that will
make sure that the NodeDriverContext is enriched with all that's needed by
the driver.
"""
@abc.abstractmethod
def initialize(self, name):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called. Name is a unique attribute
that identifies the driver.
"""
pass
@abc.abstractmethod
def get_plumbing_info(self, context):
""" Tells the NCP Plugin which kind of plumbing is needed by the Node.
The plumbing info is defined as a collection of needed policy targets
on a specific role, this may vary based on the node
(obtained from the NodeDriverContext) that the specific driver is asked
to deploy. An example of plumbing info is the following:
{
"management": <list of updated PT body dicts, one for each needed>,
"provider": <list of updated PT body dicts, one for each needed>,
"consumer": <list of updated PT body dicts, one for each needed>
}
The role (key of the above dictionary) specifies in which "side" the
policy target has to exist. Depending on the kind of chaining the
Neutron port could actually be placed somewhere else! The value
is a list of attributes intended to override the PT body. This could
be used, for example, for providing explicit Neutron Ports when the
driver requires it or for establishing a naming convention for the PTs.
An empty dictionary will be mostly used in this case, which will
indicate a basic PT creation:
{
"management": [{}], # One PT needed in the management
"provider": [{}, {port_id: 'a'}], # Two PT needed in the provider
"consumer": [] # Zero PT needed in the consumer
}
"""
pass
@abc.abstractmethod
def validate_create(self, context):
"""Validate whether a SCN can be processed or not for creation.
This method is intended as a indicative measure of whether the NCP
plugin should use this specific driver for scheduling a given node.
A successful validation is a prerequisite but doesn't guarantee that
this driver will ultimately be chosen.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
"""
pass
@abc.abstractmethod
def validate_update(self, context):
"""Validate whether a SCN can be processed or not.
This method will be called whenever a specific Node owned by this
driver needs to be updated. It should be used to verify whether the
Driver is capable of enforcing the update or not.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
"""
pass
@abc.abstractmethod
def create(self, context):
"""Instantiate a Service Chain Node based on the chain context.
This method will be called at Service Chain instantiation time by the
NCP plugin. Every scheduled Node Driver will be assigned a Node of the
chain that has to be deployed based on the node definition and the
service chain context. The same driver could be called multiple times
on different nodes of the same chain.
The datapath is expected to work according to the user intent at the
end of the chain instantiation.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
"""
pass
@abc.abstractmethod
def delete(self, context):
"""Destroy a deployed Service Chain Node.
This method will be called when a Service Chain Instance is destroyed
or in case of node rescheduling. The driver is expected to undeploy the
specific node and free the owned resources. Freeing the resources
created by the NCP plugin as a consequence of the plumbing_info
method belongs to the NCP plugin, and it is in charge of disposing
them if needed.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
"""
pass
@abc.abstractmethod
def update(self, context):
"""Update a deployed Service Chain Node.
Some changes in the Service Chain Node could need modifications in all
its instances. This method will be used in order to synchronize the
service configuration with the user expectation.
The original node definition is provided in the context in order to
calculate the difference if needed.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
"""
pass
@abc.abstractmethod
def update_policy_target_added(self, context, policy_target):
"""Update a deployed Service Chain Node on adding of a PT.
This method can be used for auto scaling some services whenever a
Policy Target is added to a relevant PTG.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
:param policy_target: Dict representing a Policy Target.
"""
pass
@abc.abstractmethod
def update_policy_target_removed(self, context, policy_target):
"""Update a deployed Service Chain Node on removal of a PT.
This method can be used for auto scaling some services whenever a
Policy Target is removed from a relevant PTG.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
:param policy_target: Dict representing a Policy Target.
"""
pass
@abc.abstractmethod
def update_node_consumer_ptg_added(self, context, policy_target_group):
"""Update a deployed Service Chain Node on addition of a consumer PTG.
This method can be used for auto scaling some services whenever a
Policy Target is added to a relevant PTG.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
:param policy_target_group: Dict representing a Policy Target Group.
"""
pass
@abc.abstractmethod
def update_node_consumer_ptg_removed(self, context, policy_target_group):
"""Update a deployed Service Chain Node on removal of a consumer PTG.
This method can be used for auto scaling some services whenever a
Policy Target is removed from a relevant PTG.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
:param policy_target_group: Dict representing a Policy Target Group.
"""
pass
@abc.abstractmethod
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group):
"""Update a Node Driver that a chain provider PTG was created or
changed
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
:param old_policy_target_group: PTG state before the update. None if
the group was created.
:param current_policy_target_group: Current PTG state.
"""
pass
@abc.abstractmethod
def notify_chain_parameters_updated(self, context):
"""Update a deployed Service Chain Node on GBP PRS updates
This method can be used to inform the node driver that some parameter
that affects the service chain is updated. The update may be
something like adding or removing an Allow Rule to the ruleset and
this has to be enforced in the Firewall Service VM, or it could simply
be a classifier update.
:param context: NodeDriverContext instance describing the service chain
and the specific node to be processed by this driver.
"""
pass
@abc.abstractproperty
def name(self):
pass

View File

@ -1,66 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions used by NodeCompositionPlugin and drivers."""
from neutron_lib import exceptions
from gbpservice._i18n import _
class NodeDriverError(exceptions.NeutronException):
"""Node driver call failed."""
message = _("%(method)s failed.")
class NodeCompositionPluginException(exceptions.NeutronException):
"""Base for node driver exceptions returned to user."""
pass
class PlumbingException(exceptions.NeutronException):
"""Base for node driver exceptions returned to user."""
pass
class NodeCompositionPluginBadRequest(exceptions.BadRequest,
NodeCompositionPluginException):
"""Base for node driver bad request exceptions returned to user."""
pass
class OneSpecPerInstanceAllowed(NodeCompositionPluginBadRequest):
message = _("The Node Composition Plugin only supports one Servicechain"
"Spec per Servicechain Instance.")
class NoDriverAvailableForAction(NodeCompositionPluginBadRequest):
message = _("The Node Composition Plugin can't find any Node Driver "
"available for executing %(action)s on node %(node_id)s. "
"This may be caused by a Servicechain Node misconfiguration "
"or an unsupported Service Profile.")
class ServiceProfileInUseByAnInstance(NodeCompositionPluginBadRequest):
message = _("Cannot update Service Profile %(profile_id)s because it's "
"used by servicechain instance %(instance_id)s.")
class NotAvailablePTGForTargetRequest(PlumbingException):
message = _("PTG of type %(ptg_type)s doesn't exist for service chain "
"instance %(instance)s. However, it is required by the "
"scheduled Node Driver in order to deploy Node %(node)s")
class InuseSpecNodeUpdateNotAllowed(NodeCompositionPluginBadRequest):
message = _("The Node Composition Plugin does not support updating the "
"nodes in an instantiated servicechain spec.")

View File

@ -1,157 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
from oslo_log import log as logging
import sqlalchemy as sa
from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db
LOG = logging.getLogger(__name__)
PROVIDER = 'provider'
CONSUMER = 'consumer'
MANAGEMENT = 'management'
RELATIONSHIPS = [PROVIDER, CONSUMER, MANAGEMENT]
class NodeToDriverMapping(model_base.BASEV2):
"""Node to Driver mapping DB.
This table keeps track of the driver owning a specific SC Node based on
the SC instance
"""
__tablename__ = 'ncp_node_to_driver_mapping'
servicechain_node_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_nodes.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
# Based on the extension name
driver_name = sa.Column(sa.String(36), nullable=False)
servicechain_instance_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_instances.id',
ondelete='CASCADE'),
primary_key=True)
class ServiceTarget(model_base.BASEV2):
"""Service related policy targets.
Internal information regarding the policy targets owned by services.
"""
__tablename__ = 'ncp_service_targets'
policy_target_id = sa.Column(sa.String(36),
sa.ForeignKey(gp_db.PolicyTarget.id,
ondelete='CASCADE'),
nullable=False, primary_key=True)
# Not a FK to avoid constraint error on SCI delete
# keeping the DB entry is useful to identify uncleaned PTs
servicechain_instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
# Not a FK to avoid constraint error on SCN delete.
# keeping the DB entry is useful to identify uncleaned PTs
servicechain_node_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
# Defines on which "side" of the chain the PT is placed. typically
# its values can be "provider", "consumer" or "management"
relationship = sa.Column(sa.String(25), nullable=False)
position = sa.Column(sa.Integer)
def set_node_owner(context, driver_name):
session = context.session
with session.begin(subtransactions=True):
owner = NodeToDriverMapping(
servicechain_instance_id=context.instance['id'],
servicechain_node_id=context.current_node['id'],
driver_name=driver_name)
session.add(owner)
def get_node_owner(context):
session = context.session
with session.begin(subtransactions=True):
query = session.query(NodeToDriverMapping)
query = query.filter_by(
servicechain_instance_id=context.instance['id'])
query = query.filter_by(
servicechain_node_id=context.current_node['id'])
return query.all()
def unset_node_owner(context):
session = context.session
with session.begin(subtransactions=True):
query = session.query(NodeToDriverMapping)
query = query.filter_by(
servicechain_instance_id=context.instance['id'])
query = query.filter_by(
servicechain_node_id=context.current_node['id'])
for owner in query.all():
session.delete(owner)
def set_service_target(context, policy_target_id, relationship):
session = context.session
with session.begin(subtransactions=True):
owner = ServiceTarget(
policy_target_id=policy_target_id,
servicechain_instance_id=context.instance['id'],
servicechain_node_id=context.current_node['id'],
position=context.current_position,
relationship=relationship)
session.add(owner)
def get_service_targets(session, policy_target_id=None, relationship=None,
servicechain_instance_id=None, position=None,
servicechain_node_id=None):
with session.begin(subtransactions=True):
query = _prepare_service_target_query(
session, policy_target_id=policy_target_id,
relationship=relationship,
servicechain_instance_id=servicechain_instance_id,
position=position, servicechain_node_id=servicechain_node_id)
return query.all()
def get_service_targets_count(session, policy_target_id=None,
relationship=None, servicechain_instance_id=None,
position=None, servicechain_node_id=None):
with session.begin(subtransactions=True):
query = _prepare_service_target_query(
session, policy_target_id=policy_target_id,
relationship=relationship,
servicechain_instance_id=servicechain_instance_id,
position=position, servicechain_node_id=servicechain_node_id)
return query.count()
def _prepare_service_target_query(session, policy_target_id=None,
relationship=None,
servicechain_instance_id=None, position=None,
servicechain_node_id=None):
query = session.query(ServiceTarget)
if servicechain_instance_id:
query = query.filter_by(
servicechain_instance_id=servicechain_instance_id)
if servicechain_node_id:
query = query.filter_by(
servicechain_node_id=servicechain_node_id)
if policy_target_id:
query = query.filter_by(policy_target_id=policy_target_id)
if position:
query = query.filter_by(position=position)
if relationship:
query = query.filter_by(relationship=relationship)
return query

View File

@ -1,121 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
import stevedore
from gbpservice.neutron.services.servicechain.plugins.ncp import config # noqa
from gbpservice.neutron.services.servicechain.plugins.ncp import model
LOG = logging.getLogger(__name__)
class NodeDriverManager(stevedore.named.NamedExtensionManager):
"""Route servicechain APIs to servicechain node drivers.
"""
def __init__(self):
# Registered node drivers, keyed by name.
self.drivers = {}
# Ordered list of node drivers.
self.ordered_drivers = []
names = cfg.CONF.node_composition_plugin.node_drivers
LOG.info("Configured service chain node driver names: %s", names)
super(NodeDriverManager,
self).__init__(
'gbpservice.neutron.servicechain.ncp_drivers', names,
invoke_on_load=True, name_order=True)
LOG.info(
"Loaded service chain node driver names: %s", self.names())
self._register_drivers()
def _register_drivers(self):
"""Register all service chain node drivers."""
for ext in self:
self.drivers[ext.name] = ext
self.ordered_drivers.append(ext)
LOG.info("Registered service chain node drivers: %s",
[driver.name for driver in self.ordered_drivers])
def initialize(self):
"""Initialize all the service chain node drivers."""
self.native_bulk_support = True
for driver in self.ordered_drivers:
LOG.info("Initializing service chain node drivers '%s'",
driver.name)
driver.obj.initialize(driver.name)
self.native_bulk_support &= getattr(driver.obj,
'native_bulk_support', True)
def schedule_deploy(self, context):
"""Schedule Node Driver for Node creation.
Given a NodeContext, this method returns the driver capable of creating
the specific node.
"""
for driver in self.ordered_drivers:
try:
driver.obj.validate_create(context)
if not model.get_node_owner(context):
model.set_node_owner(context, driver.obj.name)
return driver.obj
except n_exc.NeutronException as e:
LOG.warning(e.message)
def schedule_destroy(self, context):
"""Schedule Node Driver for Node disruption.
Given a NodeContext, this method returns the driver capable of
destroying the specific node.
"""
driver = self.get_owning_driver(context)
if driver:
model.unset_node_owner(context)
return driver
def schedule_update(self, context):
"""Schedule Node Driver for Node Update.
Given a NodeContext, this method returns the driver capable of updating
the specific node.
"""
driver = self.get_owning_driver(context)
if driver:
driver.validate_update(context)
return driver
def schedule_get(self, context):
"""Schedule Node Driver to get Node details.
Given a NodeContext, this method returns the driver capable of
getting details the specific node.
"""
driver = self.get_owning_driver(context)
return driver
def clear_node_owner(self, context):
"""Remove Node Driver ownership set for a Node
Given a NodeContext, this method removes the Node owner mapping in DB.
This method is used when we want to perform a disruptive chain update
by deleting and recreating the Node instances
"""
model.unset_node_owner(context)
def get_owning_driver(self, context):
owner = model.get_node_owner(context)
if owner:
driver = self.drivers.get(owner[0].driver_name)
return driver.obj if driver else None

View File

@ -1,82 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
class NoopNodeDriver(driver_base.NodeDriverBase):
initialized = False
@log.log_method_call
def initialize(self, name):
self.initialized = True
self._name = name
@log.log_method_call
def get_plumbing_info(self, context):
pass
@log.log_method_call
def validate_create(self, context):
pass
@log.log_method_call
def validate_update(self, context):
pass
@log.log_method_call
def create(self, context):
pass
@log.log_method_call
def delete(self, context):
pass
@log.log_method_call
def update(self, context):
pass
@log.log_method_call
def update_policy_target_added(self, context, policy_target):
pass
@log.log_method_call
def update_policy_target_removed(self, context, policy_target):
pass
@log.log_method_call
def update_node_consumer_ptg_added(self, context, policy_target_group):
pass
@log.log_method_call
def update_node_consumer_ptg_removed(self, context, policy_target_group):
pass
@log.log_method_call
def notify_chain_parameters_updated(self, context):
pass
@log.log_method_call
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group):
pass
@property
def name(self):
return self._name
@log.log_method_call
def get_status(self, context):
return {'status': '', 'status_details': ''}

View File

@ -1,524 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from heatclient import exc as heat_exc
from neutron.db import models_v2 as ndb
from neutron_lib.db import model_base
from neutron_lib.plugins import constants as pconst
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_serialization import jsonutils
import sqlalchemy as sa
from gbpservice._i18n import _
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc)
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
openstack_heat_api_client as heat_api_client)
LOG = logging.getLogger(__name__)
service_chain_opts = [
cfg.IntOpt('stack_action_wait_time',
default=15,
help=_("Seconds to wait for pending stack operation "
"to complete")),
cfg.IntOpt('delete_vip_port_retries',
default=10,
help=_("Retries to check if LB VIP port is deleted")),
cfg.StrOpt('heat_uri',
default='http://localhost:8004/v1',
help=_("Heat API server address to instantiate services "
"specified in the service chain.")),
cfg.StrOpt('exclude_pool_member_tag',
default='ExcludePoolMember',
help=_("Policy Targets created for the LB Pool Members should "
"have this tag in their description")),
]
cfg.CONF.register_opts(service_chain_opts, "heat_node_driver")
EXCLUDE_POOL_MEMBER_TAG = cfg.CONF.heat_node_driver.exclude_pool_member_tag
STACK_ACTION_WAIT_TIME = cfg.CONF.heat_node_driver.stack_action_wait_time
STACK_ACTION_RETRY_WAIT = 5 # Retry after every 5 seconds
DELETE_VIP_PORT_RETRIES = cfg.CONF.heat_node_driver.delete_vip_port_retries
class ServiceNodeInstanceStack(model_base.BASEV2):
"""ServiceChainInstance stacks owned by the Node driver."""
__tablename__ = 'ncp_node_instance_stacks'
sc_instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
sc_node_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
stack_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
class InvalidServiceType(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver only supports the services "
"Firewall and LB in a Service Chain")
class ServiceProfileRequired(exc.NodeCompositionPluginBadRequest):
message = _("A Service profile is required in Service node")
class NodeVendorMismatch(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver only handles nodes which have service "
"profile with vendor name %(vendor)s")
class ServiceConfigNotJsonString(exc.NodeCompositionPluginBadRequest):
message = _("Service config should be a json string for the Heat Node "
"driver")
class HeatTemplateVersionNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver only supports AWS and HOT template "
"formats for service node config")
class ServiceResourceDefinitionsMissing(exc.NodeCompositionPluginBadRequest):
message = _("The Service template does not have service resources defined")
class HeatResourceMissing(exc.NodeCompositionPluginBadRequest):
message = _("The service template requires the Resource %(resource)s for "
"service type %(servicetype)s")
class ProfileUpdateNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver does not allow updating the "
"service profile used by a Node")
class ServiceTypeUpdateNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver does not allow updating the "
"service type used by a Node")
class HeatNodeDriver(driver_base.NodeDriverBase):
vendor_name = 'heat_based_node_driver'
initialized = False
sc_supported_type = [pconst.LOADBALANCERV2, pconst.FIREWALL]
required_heat_resources = {
pconst.LOADBALANCERV2: ['OS::Neutron::LBaaS::LoadBalancer',
'OS::Neutron::LBaaS::Listener',
'OS::Neutron::LBaaS::Pool'],
pconst.FIREWALL: ['OS::Neutron::Firewall',
'OS::Neutron::FirewallPolicy'],
}
@log.log_method_call
def initialize(self, name):
self.initialized = True
self._name = name
@log.log_method_call
def get_plumbing_info(self, context):
pass
@log.log_method_call
def validate_create(self, context):
if context.current_profile is None:
raise ServiceProfileRequired()
if context.current_profile['vendor'].lower() != (
self.vendor_name.lower()):
raise NodeVendorMismatch(vendor=self.vendor_name)
service_type = context.current_profile['service_type']
if service_type not in self.sc_supported_type:
raise InvalidServiceType()
self._validate_service_config(context.current_node['config'],
service_type)
@log.log_method_call
def validate_update(self, context):
if not context.original_node: # PT create/delete notifications
return
if context.current_profile != context.original_profile:
raise ProfileUpdateNotSupported()
if (context.current_node['service_type'] !=
context.original_node['service_type']):
raise ServiceTypeUpdateNotSupported()
else:
service_type = context.current_profile['service_type']
self._validate_service_config(context.current_node['config'],
service_type)
def _validate_service_config(self, service_template, service_type):
if not service_template:
raise ServiceResourceDefinitionsMissing()
try:
service_template = jsonutils.loads(service_template)
except Exception:
raise ServiceConfigNotJsonString()
if (not service_template.get('AWSTemplateFormatVersion') and
not service_template.get('heat_template_version')):
raise HeatTemplateVersionNotSupported()
is_template_aws_version = service_template.get(
'AWSTemplateFormatVersion', False)
resources_key = 'Resources' if is_template_aws_version else 'resources'
if not service_template.get(resources_key):
raise ServiceResourceDefinitionsMissing()
for resource_name in self.required_heat_resources[service_type]:
param_key = self._get_heat_resource_key(
service_template[resources_key],
is_template_aws_version,
resource_name)
if not param_key:
raise HeatResourceMissing(resource=resource_name,
servicetype=service_type)
@log.log_method_call
def create(self, context):
heatclient = self._get_heat_client(context.plugin_context)
stack_template, stack_params = self._fetch_template_and_params(context)
stack_name = ("stack_" + context.instance['name'] +
context.current_node['name'] +
context.instance['id'][:8] +
context.current_node['id'][:8])
# Heat does not accept space in stack name
stack_name = stack_name.replace(" ", "")
stack = heatclient.create(stack_name, stack_template, stack_params)
self._insert_node_instance_stack_in_db(
context.plugin_session, context.current_node['id'],
context.instance['id'], stack['stack']['id'])
@log.log_method_call
def delete(self, context):
stack_ids = self._get_node_instance_stacks(context.plugin_session,
context.current_node['id'],
context.instance['id'])
heatclient = self._get_heat_client(context.plugin_context)
for stack in stack_ids:
vip_port_id = None
try:
rstr = heatclient.client.resources.get(stack_ids[0].stack_id,
'loadbalancer')
vip_port_id = rstr.attributes['vip_port_id']
except heat_exc.HTTPNotFound:
# stack not found, so no need to process any further
pass
heatclient.delete(stack.stack_id)
if vip_port_id:
for x in range(0, DELETE_VIP_PORT_RETRIES):
# We intentionally get a new session so as to be
# able to read the updated DB
session = db_api.get_reader_session()
vip_port = session.query(ndb.Port).filter_by(
id=vip_port_id).all()
if vip_port:
# heat stack delete is not finished yet, so try again
LOG.debug(("VIP port %s is not yet deleted"), vip_port)
LOG.debug(("Retry attempt; %s"), x + 1)
# Stack delete will at least take some minimal amount
# of time, hence we wait a little bit.
time.sleep(STACK_ACTION_WAIT_TIME)
else:
# we force a retry so that a new session can be
# used that will correctly reflect the VIP port as
# deleted and hence allow the subsequent policy driver
# to delete the VIP subnet
raise db_exc.RetryRequest(Exception)
self._delete_node_instance_stack_in_db(context.plugin_session,
context.current_node['id'],
context.instance['id'])
@log.log_method_call
def update(self, context):
heatclient = self._get_heat_client(context.plugin_context)
stack_template, stack_params = self._fetch_template_and_params(context)
stack_ids = self._get_node_instance_stacks(context.plugin_session,
context.current_node['id'],
context.instance['id'])
for stack in stack_ids:
self._wait_for_stack_operation_complete(
heatclient, stack.stack_id, 'update')
heatclient.update(stack.stack_id, stack_template, stack_params)
@log.log_method_call
def update_policy_target_added(self, context, policy_target):
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
self.update(context)
@log.log_method_call
def update_policy_target_removed(self, context, policy_target):
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
self.update(context)
@log.log_method_call
def update_node_consumer_ptg_added(self, context, policy_target_group):
pass
@log.log_method_call
def update_node_consumer_ptg_removed(self, context, policy_target_group):
pass
@log.log_method_call
def notify_chain_parameters_updated(self, context):
self.update(context)
@log.log_method_call
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group):
pass
def get_status(self, context):
# TODO(Sumit): Needs to be implemented
return {'status': '', 'status_details': ''}
@property
def name(self):
return self._name
def _get_heat_client(self, plugin_context):
return heat_api_client.HeatClient(
plugin_context,
cfg.CONF.heat_node_driver.heat_uri)
def _fetch_template_and_params(self, context):
sc_instance = context.instance
provider_ptg = context.provider
# TODO(Magesh): Handle multiple subnets
provider_ptg_subnet_id = provider_ptg['subnets'][0]
consumer = context.consumer
service_type = context.current_profile['service_type']
stack_template = context.current_node.get('config')
stack_template = jsonutils.loads(stack_template)
config_param_values = sc_instance.get('config_param_values', {})
stack_params = {}
if config_param_values:
config_param_values = jsonutils.loads(config_param_values)
is_template_aws_version = stack_template.get(
'AWSTemplateFormatVersion', False)
if service_type == pconst.LOADBALANCERV2:
self._generate_pool_members(context, stack_template,
config_param_values,
provider_ptg,
is_template_aws_version)
elif service_type == pconst.FIREWALL:
provider_subnet = context.core_plugin.get_subnet(
context.plugin_context, provider_ptg_subnet_id)
consumer_cidrs = []
if consumer:
if context.is_consumer_external:
# REVISIT(Magesh): Allowing the first destination which is
# 0/0 Validate and skip adding FW rule in case routes is
# not set
es = context.gbp_plugin.get_external_segment(
context.plugin_context,
consumer['external_segments'][0])
consumer_cidrs = [x['destination']
for x in es['external_routes']]
else:
consumer_subnet = context.core_plugin.get_subnet(
context._plugin_context, consumer['subnets'][0])
consumer_cidrs = [consumer_subnet['cidr']]
provider_cidr = provider_subnet['cidr']
self._update_template_with_firewall_rules(
context, provider_ptg, provider_cidr, consumer_cidrs,
stack_template, is_template_aws_version)
node_params = (stack_template.get('Parameters') or
stack_template.get('parameters') or [])
for parameter in node_params:
if parameter == "Subnet":
stack_params[parameter] = provider_ptg_subnet_id
elif parameter == "service_chain_metadata":
stack_params[parameter] = sc_instance['id']
elif parameter in config_param_values:
stack_params[parameter] = config_param_values[parameter]
return (stack_template, stack_params)
def _wait_for_stack_operation_complete(self, heatclient, stack_id, action):
time_waited = 0
while True:
try:
stack = heatclient.get(stack_id)
if stack.stack_status == 'DELETE_FAILED':
heatclient.delete(stack_id)
elif stack.stack_status not in ['UPDATE_IN_PROGRESS',
'DELETE_IN_PROGRESS']:
return
except Exception:
LOG.exception("Retrieving the stack %(stack)s failed.",
{'stack': stack_id})
return
else:
time.sleep(STACK_ACTION_RETRY_WAIT)
time_waited = time_waited + STACK_ACTION_RETRY_WAIT
if time_waited >= STACK_ACTION_WAIT_TIME:
LOG.error("Stack %(action)s not completed within "
"%(wait)s seconds",
{'action': action,
'wait': STACK_ACTION_WAIT_TIME,
'stack': stack_id})
return
def _delete_node_instance_stack_in_db(self, session, sc_node_id,
sc_instance_id):
with session.begin(subtransactions=True):
stacks = (session.query(ServiceNodeInstanceStack).
filter_by(sc_node_id=sc_node_id).
filter_by(sc_instance_id=sc_instance_id).
all())
for stack in stacks:
session.delete(stack)
def _insert_node_instance_stack_in_db(self, session, sc_node_id,
sc_instance_id, stack_id):
with session.begin(subtransactions=True):
chainstack = ServiceNodeInstanceStack(
sc_node_id=sc_node_id,
sc_instance_id=sc_instance_id,
stack_id=stack_id)
session.add(chainstack)
def _get_node_instance_stacks(self, session, sc_node_id=None,
sc_instance_id=None):
with session.begin(subtransactions=True):
query = session.query(ServiceNodeInstanceStack)
if sc_node_id:
query = query.filter_by(sc_node_id=sc_node_id)
if sc_instance_id:
query = query.filter_by(sc_instance_id=sc_instance_id)
return query.all()
def _update_template_with_firewall_rules(self, context, provider_ptg,
provider_cidr, consumer_cidrs,
stack_template,
is_template_aws_version):
resources_key = ('Resources' if is_template_aws_version
else 'resources')
properties_key = ('Properties' if is_template_aws_version
else 'properties')
ref_key = 'Ref' if is_template_aws_version else 'get_resource'
rule_num = 1
rule_list = []
for consumer_cidr in consumer_cidrs:
rule_name = "Rule_" + str(rule_num)
rule_num = rule_num + 1
stack_template[resources_key][rule_name] = (
self._generate_firewall_rule(
is_template_aws_version, context.classifier["protocol"],
context.classifier["port_range"],
provider_cidr, consumer_cidr))
rule_list.append({ref_key: rule_name})
resource_name = 'OS::Neutron::FirewallPolicy'
fw_policy_key = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
resource_name)
stack_template[resources_key][fw_policy_key][properties_key][
'firewall_rules'] = rule_list
def _generate_firewall_rule(self, is_template_aws_version, protocol,
destination_port, destination_cidr,
source_cidr):
type_key = 'Type' if is_template_aws_version else 'type'
properties_key = ('Properties' if is_template_aws_version
else 'properties')
return {type_key: "OS::Neutron::FirewallRule",
properties_key: {
"protocol": protocol,
"enabled": True,
"destination_port": destination_port,
"action": "allow",
"destination_ip_address": destination_cidr,
"source_ip_address": source_cidr}}
def _generate_pool_members(self, context, stack_template,
config_param_values, provider_ptg,
is_template_aws_version):
resources_key = 'Resources' if is_template_aws_version else 'resources'
type_key = 'Type' if is_template_aws_version else 'type'
member_ips = self._get_member_ips(context, provider_ptg)
if not member_ips:
return
pool_res_name = None
for resource in stack_template[resources_key]:
if stack_template[resources_key][resource][type_key] == (
'OS::Neutron::LBaaS::Pool'):
pool_res_name = resource
break
for member_ip in member_ips:
member_name = 'mem-' + member_ip
stack_template[resources_key][member_name] = (
self._generate_pool_member_template(
context, is_template_aws_version,
pool_res_name, member_ip))
def _generate_pool_member_template(self, context,
is_template_aws_version,
pool_res_name, member_ip):
type_key = 'Type' if is_template_aws_version else 'type'
properties_key = ('Properties' if is_template_aws_version
else 'properties')
res_key = 'Ref' if is_template_aws_version else 'get_resource'
return {type_key: "OS::Neutron::LBaaS::PoolMember",
properties_key: {
"address": member_ip,
"admin_state_up": True,
"pool": {res_key: pool_res_name},
"protocol_port": {'get_param': 'app_port'},
"subnet": {'get_param': 'Subnet'},
"weight": 1}}
def _get_member_ips(self, context, ptg):
member_addresses = []
policy_target_groups = context.gbp_plugin.get_policy_targets(
context.plugin_context,
filters={'id': ptg.get("policy_targets")})
for policy_target in policy_target_groups:
if EXCLUDE_POOL_MEMBER_TAG not in policy_target['description']:
port_id = policy_target.get("port_id")
if port_id:
port = context.core_plugin.get_port(
context._plugin_context, port_id)
ip = port.get('fixed_ips')[0].get("ip_address")
member_addresses.append(ip)
return member_addresses
def _get_heat_resource_key(self, template_resource_dict,
is_template_aws_version, resource_name):
type_key = 'Type' if is_template_aws_version else 'type'
for key in template_resource_dict:
if template_resource_dict[key].get(type_key) == resource_name:
return key

View File

@ -1,174 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from eventlet import greenpool
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as pconst
from oslo_config import cfg
from oslo_log import log as logging
import sqlalchemy as sa
from gbpservice._i18n import _
from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc)
from gbpservice.nfp.common import constants as nfp_constants
NFP_NODE_DRIVER_OPTS = [
cfg.BoolOpt('is_service_admin_owned',
help=_("Parameter to indicate whether the Service VM has to "
"be owned by the Admin"),
default=False),
cfg.IntOpt('service_create_timeout',
default=nfp_constants.SERVICE_CREATE_TIMEOUT,
help=_("Seconds to wait for service creation "
"to complete")),
cfg.IntOpt('service_delete_timeout',
default=nfp_constants.SERVICE_DELETE_TIMEOUT,
help=_("Seconds to wait for service deletion "
"to complete")),
]
# REVISIT(ashu): Can we use is_service_admin_owned config from RMD
cfg.CONF.register_opts(NFP_NODE_DRIVER_OPTS, "nfp_node_driver")
LOG = logging.getLogger(__name__)
# REVISIT: L2 insertion not supported
GATEWAY_PLUMBER_TYPE = [pconst.FIREWALL, pconst.VPN]
nfp_context_store = threading.local()
class InvalidServiceType(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver only supports the services "
"VPN, Firewall and LB in a Service Chain")
class ServiceProfileRequired(exc.NodeCompositionPluginBadRequest):
message = _("A Service profile is required in Service node")
class NodeVendorMismatch(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver only handles nodes which have service "
"profile with vendor name %(vendor)s")
class DuplicateServiceTypeInChain(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver does not support duplicate "
"service types in same chain")
class RequiredProfileAttributesNotSet(exc.NodeCompositionPluginBadRequest):
message = _("The required attributes in service profile are not present")
class InvalidNodeOrderInChain(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver does not support the order "
"of nodes defined in the current service chain spec, "
"order should be : %(node_order)s")
class UnSupportedServiceProfile(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver does not support this service "
"profile with service type %(service_type)s and vendor "
"%(vendor)s")
class UnSupportedInsertionMode(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver supports only L3 Insertion "
"mode")
class ServiceInfoNotAvailableOnUpdate(n_exc.NeutronException):
message = _("Service information is not available with Service Manager "
"on node update")
class VipNspNotSetonProvider(n_exc.NeutronException):
message = _("Network Service policy for VIP IP address is not configured "
"on the Providing Group")
class NodeInstanceDeleteFailed(n_exc.NeutronException):
message = _("Node instance delete failed in NFP Node driver")
class NodeInstanceCreateFailed(n_exc.NeutronException):
message = _("Node instance create failed in NFP Node driver")
class NodeInstanceUpdateFailed(n_exc.NeutronException):
message = _("Node instance update failed in NFP Node driver")
class OperationNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver doesn't support operation, "
"if instance status is in BUILD state.")
class ServiceNodeInstanceNetworkFunctionMapping(model_base.BASEV2):
"""ServiceChainInstance to NFP network function mapping."""
__tablename__ = 'ncp_node_instance_network_function_mappings'
sc_instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
sc_node_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
network_function_id = sa.Column(sa.String(36),
nullable=True)
status = sa.Column(sa.String(20), nullable=True)
status_details = sa.Column(sa.String(4096), nullable=True)
class NFPContext(object):
@staticmethod
def store_nfp_context(sc_instance_id, **context):
if not hasattr(nfp_context_store, 'context'):
nfp_context_store.context = {}
# Considering each store request comes with one entry
if not nfp_context_store.context.get(sc_instance_id):
NFPContext._initialise_attr(sc_instance_id)
nfp_context_store.context[sc_instance_id].update(context)
@staticmethod
def clear_nfp_context(sc_instance_id):
if not hasattr(nfp_context_store, 'context'):
return
if nfp_context_store.context.get(sc_instance_id):
del nfp_context_store.context[sc_instance_id]
@staticmethod
def get_nfp_context(sc_instance_id):
if not hasattr(nfp_context_store, 'context'):
return {}
if nfp_context_store.context.get(sc_instance_id):
return nfp_context_store.context[sc_instance_id]
return {}
@staticmethod
def _initialise_attr(sc_instance_id):
context = {'thread_pool': greenpool.GreenPool(10),
'active_threads': [],
'sc_node_count': 0,
'sc_gateway_type_nodes': [],
'network_functions': [],
'update': False}
if nfp_context_store.context:
nfp_context_store.context.update({sc_instance_id: context})
else:
nfp_context_store.context = {sc_instance_id: context}

View File

@ -1,63 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as heat_client
from heatclient import exc as heat_exc
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class HeatClient(object):
def __init__(self, context, heat_uri, password=None,
auth_token=None):
api_version = "1"
endpoint = "%s/%s" % (heat_uri, context.tenant)
kwargs = {
'token': auth_token or context.auth_token,
'username': context.user_name,
'password': password
}
self.client = heat_client.Client(api_version, endpoint, **kwargs)
self.stacks = self.client.stacks
def create(self, name, data, parameters=None):
fields = {
'stack_name': name,
'timeout_mins': 30,
'disable_rollback': True,
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.create(**fields)
def update(self, stack_id, data, parameters=None):
fields = {
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.update(stack_id, **fields)
def delete(self, stack_id):
try:
self.stacks.delete(stack_id)
except heat_exc.HTTPNotFound:
LOG.warning(
"Stack %(stack)s created by service chain driver is "
"not found at cleanup", {'stack': stack_id})
def get(self, stack_id):
return self.stacks.get(stack_id)

View File

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# An Endpoint needs to be directly reachable by the consumers,
# it is basically a traditi onal PT presented in the form of a service.
# This kind of services are typically useful only when directly addressed, and
# are irrelevant to the traffic course otherwise. The Endpoint Services
# typically get a VIP on the provider subnet.
# Example Services: L4-7 Load Balancer (Reverse Proxy)
PLUMBING_TYPE_ENDPOINT = 'endpoint'
# A gateway service is a router that the PTs will use for reaching certain
# (or all the) destinations. This kind of service usually works on the packets
# that it's entitled to route, never modifying the Source IP Address.
# Traffic can indeed be dropped, inspected or otherwise manipulated by this
# kind of service.
# Router, Firewall, -Transport- Mode VPN
PLUMBING_TYPE_GATEWAY = 'gateway'
# Rationale: A transparent service is either a L2 or a BITW service.
# This kind of service usually has 2 logical data interfaces, and everything
# that is received in either of them is pushed on the other after processing.
# The 2 interfaces typically exist in the same subnet, so traffic is not router
# but switched (or simply mirrored) instead.
# Example Services: Transparent FW, IDS, IPS, Accounting, Traffic Shaping
PLUMBING_TYPE_TRANSPARENT = 'transparent'
PLUMBING_TYPES = [PLUMBING_TYPE_ENDPOINT,
PLUMBING_TYPE_GATEWAY,
PLUMBING_TYPE_TRANSPARENT]

View File

@ -1,32 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
class NoopPlumber(plumber_base.NodePlumberBase):
initialized = False
@log.log_method_call
def initialize(self):
self.initialized = True
@log.log_method_call
def plug_services(self, context, deployment):
self._sort_deployment(deployment)
@log.log_method_call
def unplug_services(self, context, deployment):
self._sort_deployment(deployment)

View File

@ -1,599 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.quota import resource_registry
from neutron_lib.plugins import constants as pconst
from oslo_config import cfg
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_utils import excutils
from gbpservice.common import utils
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.db import servicechain_db
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
from gbpservice.neutron.services.grouppolicy.common import utils as gutils
from gbpservice.neutron.services.servicechain.plugins.ncp import (
context as ctx)
from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc)
from gbpservice.neutron.services.servicechain.plugins.ncp import (
node_driver_manager as manager)
from gbpservice.neutron.services.servicechain.plugins import sharing
LOG = logging.getLogger(__name__)
PLUMBER_NAMESPACE = 'gbpservice.neutron.servicechain.ncp_plumbers'
cfg.CONF.import_opt('policy_drivers',
'gbpservice.neutron.services.grouppolicy.config',
group='group_policy')
STATUS = 'status'
STATUS_DETAILS = 'status_details'
STATUS_SET = set([STATUS, STATUS_DETAILS])
class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
sharing.SharingMixin):
"""Implementation of the Service Chain Plugin.
"""
supported_extension_aliases = ["servicechain"]
path_prefix = gp_cts.GBP_PREFIXES[pconst.SERVICECHAIN]
@resource_registry.tracked_resources(
servicechain_node=servicechain_db.ServiceChainNode,
servicechain_spec=servicechain_db.ServiceChainSpec,
servicechain_instance=servicechain_db.ServiceChainInstance,
service_profile=servicechain_db.ServiceProfile)
def __init__(self):
self.driver_manager = manager.NodeDriverManager()
super(NodeCompositionPlugin, self).__init__()
self.driver_manager.initialize()
plumber_klass = cfg.CONF.node_composition_plugin.node_plumber
self.plumber = utils.load_plugin(
PLUMBER_NAMESPACE, plumber_klass)
self.plumber.initialize()
LOG.info("Initialized node plumber '%s'", plumber_klass)
@log.log_method_call
def create_servicechain_instance(self, context, servicechain_instance):
"""Instance created.
When a Servicechain Instance is created, all its nodes need to be
instantiated.
"""
instance = self._process_commit_phase(context)
if instance:
return instance
deployers = {}
# REVISIT: Consider adding ensure_tenant() call here
with db_api.CONTEXT_WRITER.using(context):
instance = super(NodeCompositionPlugin,
self).create_servicechain_instance(
context, servicechain_instance)
if len(instance['servicechain_specs']) > 1:
raise exc.OneSpecPerInstanceAllowed()
deployers = self._get_scheduled_drivers(context, instance,
'deploy')
if not gutils.is_precommit_policy_driver_configured():
# Actual node deploy
try:
self._deploy_servicechain_nodes(context, deployers)
except Exception:
# Some node could not be deployed
with excutils.save_and_reraise_exception():
LOG.error("Node deployment failed, "
"deleting servicechain_instance %s",
instance['id'])
self.delete_servicechain_instance(context, instance['id'])
return instance
def _process_commit_phase(self, context):
if hasattr(context, 'commit_phase'):
if not gutils.is_precommit_policy_driver_configured() and (
context.commit_phase == gp_cts.PRE_COMMIT):
# The following is a bit of a hack to no-op
# the call from the postcommit policy driver
# during the pre-commit phase.
return True
if gutils.is_precommit_policy_driver_configured() and (
context.commit_phase == gp_cts.POST_COMMIT):
instance = self.get_servicechain_instance(
context, context.servicechain_instance['id'])
self._call_deploy_sc_node(context, instance)
return instance
def _call_deploy_sc_node(self, context, instance):
# Actual node deploy
try:
deployers = self._get_scheduled_drivers(
context, instance, 'deploy')
self._deploy_servicechain_nodes(context, deployers)
except Exception:
# Some node could not be deployed
with excutils.save_and_reraise_exception():
LOG.error("Node deployment failed, "
"servicechain_instance %s is in ERROR state",
instance['id'])
@log.log_method_call
def get_servicechain_instance(self, context,
servicechain_instance_id, fields=None):
"""Instance retrieved.
While get Servicechain Instance details, get all nodes details.
"""
return self._get_resource(context, 'servicechain_instance',
servicechain_instance_id, fields)
@log.log_method_call
def update_servicechain_instance(self, context, servicechain_instance_id,
servicechain_instance):
"""Instance updated.
When a Servicechain Instance is updated and the spec changed, all the
nodes of the previous spec should be destroyed and the newer ones
created.
"""
instance = self._process_commit_phase(context)
if instance:
return instance
deployers = {}
updaters = {}
destroyers = {}
with db_api.CONTEXT_WRITER.using(context):
original_instance = self.get_servicechain_instance(
context, servicechain_instance_id)
updated_instance = super(
NodeCompositionPlugin, self).update_servicechain_instance(
context, servicechain_instance_id, servicechain_instance)
if (original_instance['servicechain_specs'] !=
updated_instance['servicechain_specs']):
if len(updated_instance['servicechain_specs']) > 1:
raise exc.OneSpecPerInstanceAllowed()
destroyers = self._get_scheduled_drivers(
context, original_instance, 'destroy')
else: # Could be classifier update
updaters = self._get_scheduled_drivers(
context, original_instance, 'update')
if (original_instance['servicechain_specs'] !=
updated_instance['servicechain_specs']):
self._destroy_servicechain_nodes(context, destroyers)
deployers = self._get_scheduled_drivers(
context, updated_instance, 'deploy')
context.deployers = deployers
context.servicechain_instance = updated_instance
if not gutils.is_precommit_policy_driver_configured():
self._deploy_servicechain_nodes(context, deployers)
else:
self._update_servicechain_nodes(context, updaters)
return updated_instance
@log.log_method_call
def delete_servicechain_instance(self, context, servicechain_instance_id):
"""Instance deleted.
When a Servicechain Instance is deleted, all its nodes need to be
destroyed.
"""
with db_api.CONTEXT_WRITER.using(context):
instance = self.get_servicechain_instance(context,
servicechain_instance_id)
destroyers = self._get_scheduled_drivers(context, instance,
'destroy')
self._destroy_servicechain_nodes(context, destroyers)
with db_api.CONTEXT_WRITER.using(context):
super(NodeCompositionPlugin, self).delete_servicechain_instance(
context, servicechain_instance_id)
@log.log_method_call
def create_servicechain_node(self, context, servicechain_node):
# REVISIT: Consider adding ensure_tenant() call here
with db_api.CONTEXT_WRITER.using(context):
result = super(NodeCompositionPlugin,
self).create_servicechain_node(context,
servicechain_node)
self._validate_shared_create(context, result, 'servicechain_node')
return result
@log.log_method_call
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node):
"""Node Update.
When a Servicechain Node is updated, all the corresponding instances
need to be updated as well. This usually results in a node
reconfiguration.
"""
updaters = {}
with db_api.CONTEXT_WRITER.using(context):
original_sc_node = self.get_servicechain_node(
context, servicechain_node_id)
updated_sc_node = super(NodeCompositionPlugin,
self).update_servicechain_node(
context, servicechain_node_id,
servicechain_node)
self._validate_shared_update(context, original_sc_node,
updated_sc_node, 'servicechain_node')
instances = self._get_node_instances(context, updated_sc_node)
for instance in instances:
node_context = ctx.get_node_driver_context(
self, context, instance, updated_sc_node, original_sc_node)
# TODO(ivar): Validate that the node driver understands the
# update.
driver = self.driver_manager.schedule_update(node_context)
if not driver:
raise exc.NoDriverAvailableForAction(
action='update', node_id=original_sc_node['id'])
updaters[instance['id']] = {}
updaters[instance['id']]['context'] = node_context
updaters[instance['id']]['driver'] = driver
updaters[instance['id']]['plumbing_info'] = (
driver.get_plumbing_info(node_context))
# Update the nodes
for update in list(updaters.values()):
try:
update['driver'].update(update['context'])
except exc.NodeDriverError as ex:
LOG.error("Node Update failed, %s",
ex.message)
return updated_sc_node
@log.log_method_call
def get_servicechain_node(
self, context, servicechain_node_id, fields=None):
return self._get_resource(context, 'servicechain_node',
servicechain_node_id, fields)
@log.log_method_call
def create_servicechain_spec(self, context, servicechain_spec):
# REVISIT: Consider adding ensure_tenant() call here
with db_api.CONTEXT_WRITER.using(context):
result = super(
NodeCompositionPlugin, self).create_servicechain_spec(
context, servicechain_spec, set_params=False)
self._validate_shared_create(context, result, 'servicechain_spec')
return result
@log.log_method_call
def update_servicechain_spec(self, context, servicechain_spec_id,
servicechain_spec):
with db_api.CONTEXT_WRITER.using(context):
original_sc_spec = self.get_servicechain_spec(
context, servicechain_spec_id)
updated_sc_spec = super(NodeCompositionPlugin,
self).update_servicechain_spec(
context, servicechain_spec_id,
servicechain_spec, set_params=False)
self._validate_shared_update(context, original_sc_spec,
updated_sc_spec, 'servicechain_spec')
# The reference plumber does not support modifying or reordering of
# nodes in a service chain spec. Disallow update for now
if (original_sc_spec['nodes'] != updated_sc_spec['nodes'] and
original_sc_spec['instances']):
raise exc.InuseSpecNodeUpdateNotAllowed()
return updated_sc_spec
@log.log_method_call
def get_servicechain_spec(self, context,
servicechain_spec_id, fields=None):
return self._get_resource(context, 'servicechain_spec',
servicechain_spec_id, fields)
@log.log_method_call
def create_service_profile(self, context, service_profile):
# REVISIT: Consider adding ensure_tenant() call here
with db_api.CONTEXT_WRITER.using(context):
result = super(
NodeCompositionPlugin, self).create_service_profile(
context, service_profile)
self._validate_shared_create(context, result, 'service_profile')
return result
@log.log_method_call
def update_service_profile(self, context, service_profile_id,
service_profile):
with db_api.CONTEXT_WRITER.using(context):
original_profile = self.get_service_profile(
context, service_profile_id)
updated_profile = super(NodeCompositionPlugin,
self).update_service_profile(
context, service_profile_id,
service_profile)
self._validate_profile_update(context, original_profile,
updated_profile)
return updated_profile
@log.log_method_call
def get_service_profile(self, context, service_profile_id, fields=None):
return self._get_resource(context, 'service_profile',
service_profile_id, fields)
def update_chains_pt_added(self, context, policy_target, instance_id):
""" Auto scaling function.
Notify the correct set of node drivers that a new policy target has
been added to a relevant PTG.
"""
self._update_chains_pt_modified(context, policy_target, instance_id,
'added')
def update_chains_pt_removed(self, context, policy_target, instance_id):
""" Auto scaling function.
Notify the correct set of node drivers that a new policy target has
been removed from a relevant PTG.
"""
self._update_chains_pt_modified(context, policy_target, instance_id,
'removed')
def update_chains_consumer_added(self, context, policy_target_group,
instance_id):
""" Auto scaling function.
Override this method to react to policy target group addition as
a consumer of a chain.
"""
self._update_chains_consumer_modified(context, policy_target_group,
instance_id, 'added')
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group,
instance_id):
""" Utility function.
Override this method to react to policy target group update
"""
self._policy_target_group_updated(context,
old_policy_target_group,
current_policy_target_group,
instance_id)
def update_chains_consumer_removed(self, context, policy_target_group,
instance_id):
""" Auto scaling function.
Override this method to react to policy target group removed as a
consumer of a chain
"""
self._update_chains_consumer_modified(context, policy_target_group,
instance_id, 'removed')
def _policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group,
instance_id):
updaters = self._get_scheduled_drivers(
context,
self.get_servicechain_instance(context, instance_id),
'update')
for update in list(updaters.values()):
try:
update['driver'].policy_target_group_updated(
update['context'],
old_policy_target_group,
current_policy_target_group)
except exc.NodeDriverError as ex:
LOG.error("Node Update on policy target group modification"
" failed, %s", ex.message)
def _update_chains_pt_modified(self, context, policy_target, instance_id,
action):
updaters = self._get_scheduled_drivers(
context, self.get_servicechain_instance(context, instance_id),
'update')
for update in list(updaters.values()):
try:
getattr(update['driver'],
'update_policy_target_' + action)(
update['context'], policy_target)
except exc.NodeDriverError as ex:
LOG.error("Node Update on policy target modification "
"failed, %s", ex.message)
def _update_chains_consumer_modified(self, context, policy_target_group,
instance_id, action):
updaters = self._get_scheduled_drivers(
context, self.get_servicechain_instance(context, instance_id),
'update')
for update in list(updaters.values()):
try:
getattr(update['driver'],
'update_node_consumer_ptg_' + action)(
update['context'], policy_target_group)
except exc.NodeDriverError as ex:
LOG.error(
"Node Update on policy target group modification "
"failed, %s", ex.message)
def notify_chain_parameters_updated(self, context,
servicechain_instance_id):
"""Hook for GBP drivers to inform about any updates that affect the SCI
Notify the correct set of node drivers that some parameter that affects
the service chain is updated. The updates could be something like
adding or removing an Allow Rule to the ruleset and may have to be
enforced in the Firewall Service VM, or it could simply be a
classifier update.
"""
sci = self.get_servicechain_instance(context, servicechain_instance_id)
updaters = self._get_scheduled_drivers(context, sci, 'update')
for update in list(updaters.values()):
try:
getattr(update['driver'],
'notify_chain_parameters_updated')(update['context'])
except exc.NodeDriverError as ex:
LOG.error("Node Update on GBP parameter update "
"failed, %s", ex.message)
def _get_instance_nodes(self, context, instance):
context = utils.admin_context(context)
if not instance['servicechain_specs']:
return []
specs = self.get_servicechain_spec(
context, instance['servicechain_specs'][0])
return self.get_servicechain_nodes(context, {'id': specs['nodes']})
def _get_node_instances(self, context, node):
context = utils.admin_context(context)
specs = self.get_servicechain_specs(
context, {'id': node['servicechain_specs']})
result = []
for spec in specs:
result.extend(self.get_servicechain_instances(
context, {'id': spec['instances']}))
return result
def _get_scheduled_drivers(self, context, instance, action, nodes=None):
if nodes is None:
nodes = self._get_instance_nodes(context, instance)
result = {}
func = getattr(self.driver_manager, 'schedule_' + action)
for node in nodes or []:
node_context = ctx.get_node_driver_context(
self, context, instance, node)
driver = func(node_context)
if not driver:
raise exc.NoDriverAvailableForAction(action=action,
node_id=node['id'])
result[node['id']] = {}
result[node['id']]['driver'] = driver
result[node['id']]['context'] = node_context
result[node['id']]['plumbing_info'] = driver.get_plumbing_info(
node_context)
return result
def _get_resource(self, context, resource_name, resource_id, fields=None):
deployers = {}
with db_api.CONTEXT_WRITER.using(context):
resource = getattr(super(NodeCompositionPlugin,
self), 'get_' + resource_name)(context, resource_id)
if resource_name == 'servicechain_instance':
if len(resource['servicechain_specs']) > 1:
raise exc.OneSpecPerInstanceAllowed()
try:
deployers = self._get_scheduled_drivers(context, resource,
'get')
except Exception:
LOG.warning("Failed to get node driver")
# Invoke drivers only if status attributes are requested
if not fields or STATUS_SET.intersection(set(fields)):
_resource = self._get_resource_status(context, resource_name,
deployers)
if _resource:
updated_status = _resource['status']
updated_status_details = _resource['status_details']
if resource['status'] != updated_status or (
resource['status_details'] != updated_status_details):
new_status = {resource_name:
{'status': updated_status,
'status_details': updated_status_details}}
session = context.session
with session.begin(subtransactions=True):
getattr(super(NodeCompositionPlugin, self),
'update_' + resource_name)(
context, resource['id'], new_status)
resource['status'] = updated_status
resource['status_details'] = updated_status_details
return db_api.resource_fields(resource, fields)
def _get_resource_status(self, context, resource_name, deployers=None):
"""
Invoke node drivers only for servicechain_instance.
Node driver should implement get_status api to return status
and status_details of servicechain_instance
"""
if resource_name == 'servicechain_instance':
nodes_status = []
result = {'status': 'BUILD',
'status_details': 'node deployment in progress'}
if deployers:
try:
for deploy in list(deployers.values()):
driver = deploy['driver']
nodes_status.append(driver.get_status(
deploy['context']))
node_status = [node['status'] for node in nodes_status]
if 'ERROR' in node_status:
result['status'] = 'ERROR'
result['status_details'] = 'node deployment failed'
elif node_status.count('ACTIVE') == len(
list(deployers.values())):
result['status'] = 'ACTIVE'
result['status_details'] = 'node deployment completed'
except Exception as exc:
LOG.error("Failed to get servicechain instance status "
"from node driver, Error: %(exc)s", {'exc': exc})
return
return result
result = {'status': 'ACTIVE', 'status_details': ''}
return result
def _deploy_servicechain_nodes(self, context, deployers):
self.plumber.plug_services(context, list(deployers.values()))
for deploy in list(deployers.values()):
driver = deploy['driver']
driver.create(deploy['context'])
def _update_servicechain_nodes(self, context, updaters):
for update in list(updaters.values()):
driver = update['driver']
driver.update(update['context'])
def _destroy_servicechain_nodes(self, context, destroyers):
# Actual node disruption
try:
for destroy in list(destroyers.values()):
driver = destroy['driver']
try:
driver.delete(destroy['context'])
except exc.NodeDriverError:
LOG.error("Node destroy failed, for node %s ",
driver['context'].current_node['id'])
except Exception as e:
if db_api.is_retriable(e):
with excutils.save_and_reraise_exception():
LOG.debug(
"Node driver '%(name)s' failed in"
" %(method)s, operation will be retried",
{'name': driver._name, 'method': 'delete'}
)
LOG.exception(e)
finally:
self.driver_manager.clear_node_owner(destroy['context'])
finally:
self.plumber.unplug_services(context, list(destroyers.values()))
def _validate_profile_update(self, context, original, updated):
# Raise if the profile is in use by any instance
# Ugly one shot query to verify whether the profile is in use
db = servicechain_db
query = context.session.query(db.ServiceChainInstance)
query = query.join(db.InstanceSpecAssociation)
query = query.join(db.ServiceChainSpec)
query = query.join(db.SpecNodeAssociation)
query = query.join(db.ServiceChainNode)
instance = query.filter(
db.ServiceChainNode.service_profile_id == original['id']).first()
if instance:
raise exc.ServiceProfileInUseByAnInstance(
profile_id=original['id'], instance_id=instance.id)
self._validate_shared_update(context, original, updated,
'service_profile')

View File

@ -1,149 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
import six
from gbpservice.neutron.extensions import group_policy
from gbpservice.neutron.services.servicechain.plugins.ncp import exceptions
from gbpservice.neutron.services.servicechain.plugins.ncp import model
TARGET_DESCRIPTION = "%s facing Service Target for node %s in instance %s"
SERVICE_TARGET_NAME_PREFIX = 'service_target_'
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class NodePlumberBase(object):
"""Node Plumber base Class
The node plumber is an entity which takes care of plumbing nodes in a
chain. By node plumbing is intended the creation/disruption of the
appropriate Neutron and GBP constructs (typically Ports and Policy Targets)
based on the specific Node needs, taking into account the whole service
chain in the process. Ideally, this module will ensure that the traffic
flows as expected according to the user intent.
"""
@abc.abstractmethod
def initialize(self):
"""Perform plumber initialization.
No abstract methods defined below will be called prior to this method
being called.
"""
@abc.abstractmethod
def plug_services(self, context, deployment):
"""Plug services
Given a deployment, this method is expected to create all the needed
policy targets / neutron ports placed where needed based on the whole
chain configuration.
The expectation is that this method ultimately creates ServiceTarget
object that will be retrieved by the node drivers at the right time.
A deployment is a list composed as follows:
[{'context': node_context,
'driver': deploying_driver,
'plumbing_info': node_plumbing_needs},
...]
No assumptions should be made on the order of the nodes as received in
the deployment, but it can be retrieved by calling
node_context.current_position
"""
@abc.abstractmethod
def unplug_services(self, context, deployment):
"""Plug services
Given a deployment, this method is expected to destroy all the
policy targets / neutron ports previously created for this chain
configuration.
The expectation is that this method ultimately removes all the
ServiceTarget related to this particular chain.
A deployment is a list composed as follows:
[{'context': node_context,
'driver': deploying_driver,
'plumbing_info': node_plumbing_needs},
...]
No assumptions should be made on the order of the nodes as received in
the deployment, but it can be retrieved by calling
node_context.current_position
"""
def _create_service_targets(self, context, part):
info = part['plumbing_info']
if not info:
return
part_context = part['context']
provider = part_context.provider
consumer = part_context.consumer
management = part_context.management
self._create_service_target(context, part_context,
info.get('provider', []),
provider, 'provider')
self._create_service_target(context, part_context,
info.get('consumer', []),
consumer, 'consumer')
self._create_service_target(context, part_context,
info.get('management', []),
management, 'management')
def _delete_service_targets(self, context, part):
part_context = part['context']
node = part_context.current_node
instance = part_context.instance
gbp_plugin = part_context.gbp_plugin
pts = model.get_service_targets(
context.session, servicechain_instance_id=instance['id'],
servicechain_node_id=node['id'])
for pt in pts:
try:
gbp_plugin.delete_policy_target(
context.elevated(), pt.policy_target_id)
except group_policy.PolicyTargetNotFound as ex:
LOG.debug(ex.message)
def _create_service_target(self, context, part_context, targets, group,
relationship, extra_data=None):
extra_data = extra_data or {}
instance = part_context.instance
node = part_context.current_node
gbp_plugin = part_context.gbp_plugin
for target in targets:
if not group:
raise exceptions.NotAvailablePTGForTargetRequest(
ptg_type=relationship, instance=instance['id'],
node=node['id'])
data = {'policy_target_group_id': group['id'],
'description': TARGET_DESCRIPTION % (relationship,
node['id'],
instance['id']),
'name': SERVICE_TARGET_NAME_PREFIX + '%s_%s_%s' % (
relationship, node['id'][:5], instance['id'][:5]),
'port_id': None,
'cluster_id': ''}
data.update(extra_data)
data.update(target)
pt = gbp_plugin.create_policy_target(context.elevated(),
{'policy_target': data})
model.set_service_target(part_context, pt['id'], relationship)
def _sort_deployment(self, deployment):
deployment.sort(key=lambda x: x['context'].current_position,
reverse=True)

View File

@ -1,72 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import constants as pconst
from neutron_lib.plugins import directory
from oslo_log import log as logging
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin
LOG = logging.getLogger(__name__)
class SharingMixin(object):
"""Implementation of the Service Chain Plugin sharing rules.
"""
usage_graph = {'servicechain_spec': {'nodes':
'servicechain_node'},
'servicechain_node': {'service_profile_id':
'service_profile'},
'servicechain_instance': {},
'service_profile': {},
}
@property
def gbp_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
gbp_plugin = directory.get_plugin(pconst.GROUP_POLICY)
if not gbp_plugin:
LOG.error("No group policy service plugin found.")
raise gp_exc.GroupPolicyDeploymentError()
return gbp_plugin
def _validate_shared_create(self, context, obj, identity):
return gbp_plugin.GroupPolicyPlugin._validate_shared_create(
self, context, obj, identity)
def _validate_shared_update(self, context, original, updated, identity):
self._validate_shared_create(context, updated, identity)
if updated.get('shared') != original.get('shared'):
context = context.elevated()
getattr(self, '_validate_%s_unshare' % identity)(context, updated)
def _validate_servicechain_node_unshare(self, context, obj):
# Verify not pointed by shared SCS
gbp_plugin.GroupPolicyPlugin._check_shared_or_different_tenant(
context, obj, self.get_servicechain_specs, 'id',
obj['servicechain_specs'])
def _validate_servicechain_spec_unshare(self, context, obj):
# Verify not pointed by shared policy actions
gbp_plugin.GroupPolicyPlugin._check_shared_or_different_tenant(
context, obj, self.gbp_plugin.get_policy_actions, 'action_value',
[obj['id']])
def _validate_service_profile_unshare(self, context, obj):
gbp_plugin.GroupPolicyPlugin._check_shared_or_different_tenant(
context, obj, self.get_servicechain_nodes, 'service_profile_id')

View File

@ -316,114 +316,6 @@ def get_update_nat_pool_attrs():
return {'name': 'new_name'}
# Service Chain
@gbp_attributes
def get_create_service_profile_default_attrs():
return {'name': '', 'description': ''}
@gbp_attributes
def get_create_service_profile_attrs():
return {
'name': 'serviceprofile1',
'service_type': 'FIREWALL',
'description': 'test service profile',
}
@gbp_attributes
def get_update_service_profile_attrs():
return {
'name': 'new_name',
}
@gbp_attributes
def get_create_servicechain_node_default_attrs():
return {
'name': '',
'description': '',
'config': '{}',
'service_type': None,
'shared': False,
}
@gbp_attributes
def get_create_servicechain_node_attrs():
return {
'name': 'servicechain1',
'service_profile_id': _uuid(),
'description': 'test servicechain node',
'config': '{}',
'service_type': None,
'shared': True,
}
@gbp_attributes
def get_update_servicechain_node_attrs():
return {
'name': 'new_name',
'config': 'new_config',
}
@gbp_attributes
def get_create_servicechain_spec_default_attrs():
return {
'name': '',
'description': '',
'nodes': [],
'shared': False,
}
@gbp_attributes
def get_create_servicechain_spec_attrs():
return {
'name': 'servicechainspec1',
'nodes': [_uuid(), _uuid()],
'description': 'test servicechain spec',
'shared': True,
}
@gbp_attributes
def get_update_servicechain_spec_attrs():
return {
'name': 'new_name',
'nodes': [_uuid()]
}
@gbp_attributes
def get_create_servicechain_instance_default_attrs():
return {'name': '', 'description': '', 'config_param_values': "{}"}
@gbp_attributes
def get_create_servicechain_instance_attrs():
return {
'name': 'servicechaininstance1',
'servicechain_specs': [_uuid()],
'provider_ptg_id': _uuid(),
'consumer_ptg_id': _uuid(),
'management_ptg_id': _uuid(),
'classifier_id': _uuid(),
'config_param_values': "{}",
'description': 'test servicechain instance'
}
def get_update_servicechain_instance_attrs():
return {
'name': 'new_name',
'servicechain_specs': [_uuid()],
'classifier_id': _uuid()
}
@gbp_attributes
def get_create_application_policy_group_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'shared': False}
@ -528,38 +420,6 @@ def get_create_nat_pool_default_attrs_and_prj_id():
'shared': False}
# Service Chain
@gbp_attributes
def get_create_service_profile_default_attrs_and_prj_id():
return {'name': '', 'description': ''}
@gbp_attributes
def get_create_servicechain_node_default_attrs_and_prj_id():
return {
'name': '',
'description': '',
'config': '{}',
'service_type': None,
'shared': False,
}
@gbp_attributes
def get_create_servicechain_spec_default_attrs_and_prj_id():
return {
'name': '',
'description': '',
'nodes': [],
'shared': False,
}
@gbp_attributes
def get_create_servicechain_instance_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'config_param_values': "{}"}
def get_resource_plural(resource):
if resource.endswith('y'):
resource_plural = resource.replace('y', 'ies')

View File

@ -30,11 +30,8 @@ from oslo_utils import uuidutils
import six
import webob.exc
from gbpservice.neutron.db import all_models # noqa
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db import servicechain_db as svcchain_db
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.extensions import servicechain as service_chain
from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_constants)
import gbpservice.neutron.tests
@ -210,21 +207,14 @@ class ApiManagerMixin(object):
class GroupPolicyDBTestBase(ApiManagerMixin):
resource_prefix_map = dict(
(k, gp_constants.GBP_PREFIXES[constants.SERVICECHAIN])
for k in list(service_chain.RESOURCE_ATTRIBUTE_MAP.keys()))
resource_prefix_map.update(dict(
(k, gp_constants.GBP_PREFIXES[constants.GROUP_POLICY])
for k in list(gpolicy.RESOURCE_ATTRIBUTE_MAP.keys())
))
for k in list(gpolicy.RESOURCE_ATTRIBUTE_MAP.keys()))
fmt = JSON_FORMAT
def __getattr__(self, item):
# Verify is an update of a proper GBP object
def _is_sc_resource(plural):
return plural in service_chain.RESOURCE_ATTRIBUTE_MAP
def _is_gbp_resource(plural):
return plural in gpolicy.RESOURCE_ATTRIBUTE_MAP
@ -235,7 +225,7 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
return plural in flowclassifier.RESOURCE_ATTRIBUTE_MAP
def _is_valid_resource(plural):
return (_is_gbp_resource(plural) or _is_sc_resource(plural) or
return (_is_gbp_resource(plural) or
_is_flowc_resource(plural) or _is_sfc_resource(plural))
def _get_prefix(plural):
@ -317,16 +307,6 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
self.assertEqual(sorted([i['id'] for i in res[resource_plural]]),
sorted([i[resource]['id'] for i in items]))
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, **kwargs):
prof = self.create_service_profile(
service_type=service_type,
shared=shared_profile,
tenant_id=profile_tenant_id or self._tenant_id)['service_profile']
return self.create_servicechain_node(
service_profile_id=prof['id'], **kwargs)
def _set_notification_mocks(self):
self.l3_notify_p = mock.patch(
'neutron.extensions.l3agentscheduler.notify').start()
@ -356,22 +336,11 @@ DB_GP_PLUGIN_KLASS = (GroupPolicyDBTestPlugin.__module__ + '.' +
GroupPolicyDBTestPlugin.__name__)
class ServiceChainDBTestPlugin(svcchain_db.ServiceChainDbPlugin):
supported_extension_aliases = ['servicechain'] + UNSUPPORTED_REQUIRED_EXTS
path_prefix = "/servicechain"
DB_SC_PLUGIN_KLASS = (ServiceChainDBTestPlugin.__module__ + '.' +
ServiceChainDBTestPlugin.__name__)
class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self, core_plugin=None, sc_plugin=None, service_plugins=None,
ext_mgr=None, gp_plugin=None):
sc_plugin = sc_plugin or DB_SC_PLUGIN_KLASS
gp_plugin = gp_plugin or DB_GP_PLUGIN_KLASS
if not service_plugins:
@ -379,8 +348,7 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
'l3_plugin_name': 'router',
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin',
'gp_plugin_name': gp_plugin,
'sc_plugin_name': sc_plugin}
'gp_plugin_name': gp_plugin}
# Always install SFC plugin for convenience
service_plugins['sfc_plugin_name'] = 'sfc'
@ -396,14 +364,12 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
test_policy_file = ETCDIR + "/test-policy.json"
policy.refresh(policy_file=test_policy_file)
self.plugin = importutils.import_object(gp_plugin)
self._sc_plugin = importutils.import_object(sc_plugin)
if not ext_mgr:
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
plugins = directory.get_plugins()
self._gbp_plugin = plugins.get(constants.GROUP_POLICY)
self._sc_plugin = plugins.get(constants.SERVICECHAIN)
self._l3_plugin = plugins.get(constants.L3)
self._set_notification_mocks()
# The following is done to stop the neutron code from checking

View File

@ -32,10 +32,6 @@ class GroupPolicyMappingDBTestPlugin(gpmdb.GroupPolicyMappingDbPlugin):
DB_GP_PLUGIN_KLASS = (GroupPolicyMappingDBTestPlugin.__module__ + '.' +
GroupPolicyMappingDBTestPlugin.__name__)
SC_PLUGIN_KLASS = (
"gbpservice.neutron.services.servicechain.plugins.ncp.plugin."
"NodeCompositionPlugin")
class GroupPolicyMappingDbTestCase(tgpdb.GroupPolicyDbTestCase,
test_l3.L3NatTestCaseMixin):
@ -49,8 +45,7 @@ class GroupPolicyMappingDbTestCase(tgpdb.GroupPolicyDbTestCase,
service_plugins = {
'gp_plugin_name': gp_plugin,
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin',
'servicechain_plugin': sc_plugin or SC_PLUGIN_KLASS}
'flavors_plugin.FlavorsPlugin'}
service_plugins['l3_plugin_name'] = l3_plugin or "router"
if qos_plugin:
service_plugins['qos_plugin_name'] = qos_plugin

View File

@ -1,603 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import webob.exc
from neutron_lib import context
from neutron_lib.plugins import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from gbpservice.neutron.db import servicechain_db as svcchain_db
from gbpservice.neutron.extensions import servicechain as service_chain
from gbpservice.neutron.tests.unit import common as cm
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
JSON_FORMAT = 'json'
GP_PLUGIN_KLASS = (
"gbpservice.neutron.services.grouppolicy.plugin.GroupPolicyPlugin")
class ServiceChainDBTestBase(test_group_policy_db.GroupPolicyDBTestBase):
def _get_resource_plural(self, resource):
if resource.endswith('y'):
resource_plural = resource.replace('y', 'ies')
else:
resource_plural = resource + 's'
return resource_plural
def _test_list_resources(self, resource, items,
neutron_context=None,
query_params=None):
resource_plural = self._get_resource_plural(resource)
res = self._list(resource_plural,
neutron_context=neutron_context,
query_params=query_params)
params = query_params.split('&')
params = dict((x.split('=')[0], x.split('=')[1].split(','))
for x in params)
count = getattr(self.plugin, 'get_%s_count' % resource_plural)(
neutron_context or context.get_admin_context(), params)
self.assertEqual(len(res[resource_plural]), count)
resource = resource.replace('-', '_')
self.assertEqual(sorted([i['id'] for i in res[resource_plural]]),
sorted([i[resource]['id'] for i in items]))
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, **kwargs):
prof = self.create_service_profile(
service_type=service_type,
shared=shared_profile,
tenant_id=profile_tenant_id or self._tenant_id)['service_profile']
return self.create_servicechain_node(
service_profile_id=prof['id'], **kwargs)
class ServiceChainDBTestPlugin(svcchain_db.ServiceChainDbPlugin):
supported_extension_aliases = ['servicechain'] + (
test_group_policy_db.UNSUPPORTED_REQUIRED_EXTS)
path_prefix = "/servicechain"
DB_GP_PLUGIN_KLASS = (ServiceChainDBTestPlugin.__module__ + '.' +
ServiceChainDBTestPlugin.__name__)
class ServiceChainDbTestCase(test_group_policy_db.GroupPolicyDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None, service_plugins=None,
ext_mgr=None, gp_plugin=None):
super(ServiceChainDbTestCase, self).setUp(
gp_plugin=gp_plugin or GP_PLUGIN_KLASS, core_plugin=core_plugin,
sc_plugin=sc_plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.plugin = self._sc_plugin
class TestServiceChainResources(ServiceChainDbTestCase):
def _test_show_resource(self, resource, resource_id, attrs):
resource_plural = self._get_resource_plural(resource)
req = self.new_show_request(resource_plural, resource_id,
fmt=self.fmt)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res[resource][k])
def test_create_servicechain_specs_same_node(self):
template1 = '{"key1":"value1"}'
sp = self.create_service_profile(
service_type=constants.FIREWALL)['service_profile']
scn = self.create_servicechain_node(
config=template1, service_profile_id=sp['id'])
scn_id = scn['servicechain_node']['id']
spec1 = {"servicechain_spec": {'name': 'scs1',
'tenant_id': self._tenant_id,
'nodes': [scn_id]}}
spec_req = self.new_create_request('servicechain_specs',
spec1,
self.fmt)
spec_res = spec_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPCreated.code, spec_res.status_int)
res = self.deserialize(self.fmt, spec_res)
self.assertIn('servicechain_spec', res)
self.assertEqual([scn_id], res['servicechain_spec']['nodes'])
spec2 = {"servicechain_spec": {'name': 'scs2',
'tenant_id': self._tenant_id,
'nodes': [scn_id]}}
spec_req = self.new_create_request('servicechain_specs',
spec2,
self.fmt)
spec_res = spec_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPCreated.code, spec_res.status_int)
res = self.deserialize(self.fmt, spec_res)
self.assertIn('servicechain_spec', res)
self.assertEqual([scn_id], res['servicechain_spec']['nodes'])
def test_create_and_show_servicechain_node(self):
profile = self.create_service_profile(service_type=constants.FIREWALL)
attrs = cm.get_create_servicechain_node_default_attrs(
service_profile_id=profile['service_profile']['id'],
config="config1")
scn = self.create_servicechain_node(
service_profile_id=profile['service_profile']['id'],
config="config1")
for k, v in six.iteritems(attrs):
self.assertEqual(v, scn['servicechain_node'][k])
self._test_show_resource('servicechain_node',
scn['servicechain_node']['id'],
attrs)
def test_list_servicechain_nodes(self):
scns = [
self._create_profiled_servicechain_node(name='scn1',
description='scn'),
self._create_profiled_servicechain_node(name='scn2',
description='scn'),
self._create_profiled_servicechain_node(name='scn3',
description='scn')]
self._test_list_resources('servicechain_node', scns,
query_params='description=scn')
def test_update_servicechain_node(self):
name = 'new_servicechain_node'
description = 'new desc'
config = 'new_config'
profile = self.create_service_profile(service_type=constants.FIREWALL)
attrs = cm.get_create_servicechain_node_default_attrs(
name=name, description=description,
config=config,
service_profile_id=profile['service_profile']['id'])
scn = self.create_servicechain_node(
service_profile_id=profile['service_profile']['id'])
data = {'servicechain_node': {'name': name,
'description': description,
'config': config}}
req = self.new_update_request('servicechain_nodes', data,
scn['servicechain_node']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['servicechain_node'][k])
self._test_show_resource('servicechain_node',
scn['servicechain_node']['id'],
attrs)
def test_delete_servicechain_node(self):
ctx = context.get_admin_context()
scn = self._create_profiled_servicechain_node()
scn_id = scn['servicechain_node']['id']
scs = self.create_servicechain_spec(nodes=[scn_id])
scs_id = scs['servicechain_spec']['id']
# Deleting Service Chain Node in use by a Spec should fail
self.assertRaises(service_chain.ServiceChainNodeInUse,
self.plugin.delete_servicechain_node, ctx, scn_id)
req = self.new_delete_request('servicechain_specs', scs_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# After deleting the Service Chain Spec, node delete should succeed
req = self.new_delete_request('servicechain_nodes', scn_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainNodeNotFound,
self.plugin.get_servicechain_node,
ctx, scn_id)
def test_create_and_show_servicechain_spec(self):
name = "scs1"
scn = self._create_profiled_servicechain_node()
scn_id = scn['servicechain_node']['id']
attrs = cm.get_create_servicechain_spec_default_attrs(
name=name, nodes=[scn_id])
scs = self.create_servicechain_spec(name=name, nodes=[scn_id])
for k, v in six.iteritems(attrs):
self.assertEqual(v, scs['servicechain_spec'][k])
self._test_show_resource('servicechain_spec',
scs['servicechain_spec']['id'],
attrs)
def test_create_spec_multiple_nodes(self):
name = "scs1"
scn1 = self._create_profiled_servicechain_node()
scn1_id = scn1['servicechain_node']['id']
scn2 = self._create_profiled_servicechain_node()
scn2_id = scn2['servicechain_node']['id']
attrs = cm.get_create_servicechain_spec_default_attrs(
name=name, nodes=[scn1_id, scn2_id])
scs = self.create_servicechain_spec(
name=name, nodes=[scn1_id, scn2_id])
for k, v in six.iteritems(attrs):
self.assertEqual(v, scs['servicechain_spec'][k])
def test_list_servicechain_specs(self):
scs = [self.create_servicechain_spec(name='scs1', description='scs'),
self.create_servicechain_spec(name='scs2', description='scs'),
self.create_servicechain_spec(name='scs3', description='scs')]
self._test_list_resources('servicechain_spec', scs,
query_params='description=scs')
def test_node_ordering_list_servicechain_specs(self):
scn1_id = self._create_profiled_servicechain_node()[
'servicechain_node']['id']
scn2_id = self._create_profiled_servicechain_node()[
'servicechain_node']['id']
nodes_list = [scn1_id, scn2_id]
scs = self.create_servicechain_spec(name='scs1',
nodes=nodes_list)
self.assertEqual(nodes_list, scs['servicechain_spec']['nodes'])
res = self._list('servicechain_specs')
self.assertEqual(1, len(res['servicechain_specs']))
self.assertEqual(nodes_list, res['servicechain_specs'][0]['nodes'])
# Delete the service chain spec and create another with nodes in
# reverse order and verify that that proper ordering is maintained
req = self.new_delete_request('servicechain_specs',
scs['servicechain_spec']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
nodes_list.reverse()
scs = self.create_servicechain_spec(name='scs1',
nodes=nodes_list)
self.assertEqual(scs['servicechain_spec']['nodes'], nodes_list)
res = self._list('servicechain_specs')
self.assertEqual(1, len(res['servicechain_specs']))
self.assertEqual(nodes_list, res['servicechain_specs'][0]['nodes'])
def test_update_servicechain_spec(self):
name = "new_servicechain_spec1"
description = 'new desc'
scn_id = self._create_profiled_servicechain_node()[
'servicechain_node']['id']
attrs = cm.get_create_servicechain_spec_default_attrs(
name=name, description=description, nodes=[scn_id])
scs = self.create_servicechain_spec()
data = {'servicechain_spec': {'name': name, 'description': description,
'nodes': [scn_id]}}
req = self.new_update_request('servicechain_specs', data,
scs['servicechain_spec']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['servicechain_spec'][k])
self._test_show_resource('servicechain_spec',
scs['servicechain_spec']['id'], attrs)
def test_delete_servicechain_spec(self):
ctx = context.get_admin_context()
scs = self.create_servicechain_spec()
scs_id = scs['servicechain_spec']['id']
req = self.new_delete_request('servicechain_specs', scs_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainSpecNotFound,
self.plugin.get_servicechain_spec, ctx, scs_id)
def test_delete_spec_in_use_by_policy_action_rejected(self):
ctx = context.get_admin_context()
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
data = {'policy_action': {'action_type': 'redirect',
'tenant_id': self._tenant_id,
'action_value': scs_id}}
pa_req = self.new_create_request('grouppolicy/policy_actions',
data, self.fmt)
res = pa_req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
self.assertRaises(service_chain.ServiceChainSpecInUse,
self.plugin.delete_servicechain_spec, ctx, scs_id)
def test_delete_spec_in_use_by_instance_rejected(self):
ctx = context.get_admin_context()
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
sci = self.create_servicechain_instance(servicechain_specs=[scs_id])
sci_id = sci['servicechain_instance']['id']
# Deleting the Spec used by Instance should not be allowed
self.assertRaises(service_chain.ServiceChainSpecInUse,
self.plugin.delete_servicechain_spec, ctx, scs_id)
req = self.new_delete_request('servicechain_instances', sci_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainInstanceNotFound,
self.plugin.get_servicechain_instance,
ctx, sci_id)
# Deleting the spec should succeed after the instance is deleted
req = self.new_delete_request('servicechain_specs', scs_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainSpecNotFound,
self.plugin.get_servicechain_spec, ctx, scs_id)
def test_create_and_show_servicechain_instance(self):
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
policy_target_group_id = uuidutils.generate_uuid()
classifier_id = uuidutils.generate_uuid()
config_param_values = "{}"
attrs = cm.get_create_servicechain_instance_default_attrs(
servicechain_specs=[scs_id],
provider_ptg_id=policy_target_group_id,
consumer_ptg_id=policy_target_group_id,
management_ptg_id=policy_target_group_id,
classifier_id=classifier_id,
config_param_values=config_param_values)
sci = self.create_servicechain_instance(
servicechain_specs=[scs_id],
provider_ptg_id=policy_target_group_id,
consumer_ptg_id=policy_target_group_id,
management_ptg_id=policy_target_group_id,
classifier_id=classifier_id,
config_param_values=config_param_values)
for k, v in six.iteritems(attrs):
self.assertEqual(v, sci['servicechain_instance'][k])
self._test_show_resource('servicechain_instance',
sci['servicechain_instance']['id'],
attrs)
req = self.new_delete_request('servicechain_instances',
sci['servicechain_instance']['id'])
req.get_response(self.ext_api)
def test_list_servicechain_instances(self):
servicechain_instances = [self.create_servicechain_instance(
name='sci1', description='sci'),
self.create_servicechain_instance(name='sci2', description='sci'),
self.create_servicechain_instance(name='sci3', description='sci')]
self._test_list_resources('servicechain_instance',
servicechain_instances,
query_params='description=sci')
def test_spec_ordering_list_servicechain_instances(self):
scs1_id = self.create_servicechain_spec()['servicechain_spec']['id']
scs2_id = self.create_servicechain_spec()['servicechain_spec']['id']
specs_list = [scs1_id, scs2_id]
sci = self.create_servicechain_instance(name='sci1',
servicechain_specs=specs_list)
self.assertEqual(specs_list,
sci['servicechain_instance']['servicechain_specs'])
res = self._list('servicechain_instances')
self.assertEqual(1, len(res['servicechain_instances']))
result_instance = res['servicechain_instances'][0]
self.assertEqual(specs_list, result_instance['servicechain_specs'])
# Delete the service chain instance and create another with specs in
# reverse order and verify that that proper ordering is maintained
req = self.new_delete_request('servicechain_instances',
sci['servicechain_instance']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
specs_list.reverse()
sci = self.create_servicechain_instance(name='sci1',
servicechain_specs=specs_list)
self.assertEqual(specs_list,
sci['servicechain_instance']['servicechain_specs'])
res = self._list('servicechain_instances')
self.assertEqual(1, len(res['servicechain_instances']))
result_instance = res['servicechain_instances'][0]
self.assertEqual(specs_list,
result_instance['servicechain_specs'])
def test_update_servicechain_instance(self):
name = "new_servicechain_instance"
description = 'new desc'
config_param_values = "{}"
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
provider_ptg_id = uuidutils.generate_uuid()
consumer_ptg_id = uuidutils.generate_uuid()
management_ptg_id = uuidutils.generate_uuid()
classifier_id = uuidutils.generate_uuid()
attrs = cm.get_create_servicechain_instance_default_attrs(
name=name, description=description, servicechain_specs=[scs_id],
provider_ptg_id=provider_ptg_id, consumer_ptg_id=consumer_ptg_id,
management_ptg_id=management_ptg_id,
classifier_id=classifier_id,
config_param_values=config_param_values)
sci = self.create_servicechain_instance(
servicechain_specs=[scs_id], provider_ptg_id=provider_ptg_id,
consumer_ptg_id=consumer_ptg_id,
management_ptg_id=management_ptg_id, classifier_id=classifier_id,
config_param_values=config_param_values)
new_classifier_id = uuidutils.generate_uuid()
new_scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
attrs.update({'servicechain_specs': [new_scs_id],
'classifier_id': new_classifier_id})
data = {'servicechain_instance': {'name': name,
'description': description,
'servicechain_specs': [new_scs_id],
'classifier_id': new_classifier_id}}
req = self.new_update_request('servicechain_instances', data,
sci['servicechain_instance']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['servicechain_instance'][k])
self._test_show_resource('servicechain_instance',
sci['servicechain_instance']['id'], attrs)
req = self.new_delete_request('servicechain_instances',
sci['servicechain_instance']['id'])
req.get_response(self.ext_api)
def test_delete_servicechain_instance(self):
ctx = context.get_admin_context()
sci = self.create_servicechain_instance()
sci_id = sci['servicechain_instance']['id']
req = self.new_delete_request('servicechain_instances', sci_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainInstanceNotFound,
self.plugin.get_servicechain_instance,
ctx, sci_id)
def test_create_and_show_service_profile(self):
attrs = cm.get_create_service_profile_default_attrs(
service_type=constants.FIREWALL, vendor="vendor1")
scn = self.create_service_profile(
service_type=constants.FIREWALL, vendor="vendor1")
for k, v in six.iteritems(attrs):
self.assertEqual(scn['service_profile'][k], v)
self._test_show_resource('service_profile',
scn['service_profile']['id'], attrs)
def test_list_service_profile(self):
scns = [self.create_service_profile(name='sp1', description='sp',
service_type='LOADBALANCERV2'),
self.create_service_profile(name='sp2', description='sp',
service_type='LOADBALANCERV2'),
self.create_service_profile(name='sp3', description='sp',
service_type='LOADBALANCERV2')]
self._test_list_resources('service_profile', scns,
query_params='description=sp')
def test_update_service_profile(self):
name = 'new_service_profile'
description = 'new desc'
attrs = cm.get_create_service_profile_default_attrs(
name=name, description=description,
service_type=constants.FIREWALL)
scn = self.create_service_profile(service_type=constants.FIREWALL)
data = {'service_profile': {'name': name,
'description': description}}
req = self.new_update_request('service_profiles', data,
scn['service_profile']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(res['service_profile'][k], v)
self._test_show_resource('service_profile',
scn['service_profile']['id'], attrs)
def test_delete_service_profile(self):
ctx = context.get_admin_context()
sp = self.create_service_profile(service_type='LOADBALANCERV2')
sp_id = sp['service_profile']['id']
scn = self.create_servicechain_node(service_profile_id=sp_id)
scn_id = scn['servicechain_node']['id']
# Deleting Service Chain Node in use by a Spec should fail
self.assertRaises(service_chain.ServiceProfileInUse,
self.plugin.delete_service_profile, ctx, sp_id)
req = self.new_delete_request('servicechain_nodes', scn_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# After deleting the Service Chain Spec, node delete should succeed
req = self.new_delete_request('service_profiles', sp_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceProfileNotFound,
self.plugin.get_service_profile,
ctx, sp_id)
class TestServiceChainStatusAttributesForResources(
test_group_policy_db.TestStatusAttributesForResources):
def test_set_status_attrs(self):
for resource_name in service_chain.RESOURCE_ATTRIBUTE_MAP:
self._test_set_status_attrs(self._get_resource_singular(
resource_name), self._sc_plugin)
class TestQuotasForServiceChain(ServiceChainDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None,
gp_plugin=None, service_plugins=None, ext_mgr=None):
cfg.CONF.set_override('quota_servicechain_node', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_spec', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_instance', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_service_profile', 1,
group='QUOTAS')
super(TestQuotasForServiceChain, self).setUp(
core_plugin=core_plugin, sc_plugin=sc_plugin,
gp_plugin=gp_plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
def tearDown(self):
cfg.CONF.set_override('quota_servicechain_node', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_spec', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_instance', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_service_profile', -1,
group='QUOTAS')
super(TestQuotasForServiceChain, self).tearDown()
def test_servicechain_node_quota(self):
self.create_servicechain_node()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_node)
def test_servicechain_spec_quota(self):
self.create_servicechain_spec()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_spec)
def test_servicechain_instance_quota(self):
self.create_servicechain_instance()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_instance)
def test_service_profile(self):
self.create_service_profile(service_type=constants.FIREWALL)
self.assertRaises(webob.exc.HTTPClientError,
self.create_service_profile,
service_type=constants.FIREWALL)

View File

@ -217,15 +217,6 @@ class TestHeatDriver(unittest2.TestCase):
tenant_context = self.heat_driver_obj._get_tenant_context(tenant_id)
self.assertEqual(tenant_context, expected_tenant_context)
def test_is_service_target(self):
policy_target = {'name': 'service_target_provider_0132c_00b93'}
retval = self.heat_driver_obj._is_service_target(policy_target)
self.assertTrue(retval)
policy_target = {'name': 'mem1_gbpui'}
expected_result = False
result = self.heat_driver_obj._is_service_target(policy_target)
self.assertEqual(result, expected_result)
@mock.patch.object(neutron_client.Client, "show_port")
@mock.patch.object(gbp_client.Client, "list_policy_targets")
def test_get_member_ips(self, list_pt_mock_obj, show_port_mock_obj):

View File

@ -647,33 +647,3 @@ class TestGBPClient(SampleData):
self.assertEqual(retval, obj)
mock_obj.assert_called_once_with(token=self.AUTH_TOKEN,
endpoint_url=self.ENDPOINT_URL)
def test_get_service_profile(self, mock_obj):
instance = mock_obj.return_value
obj = instance.show_service_profile(
'service_profile_id')['service_profile']
retval = self.gbp_obj.get_service_profile(self.AUTH_TOKEN,
'service_profile_id')
self.assertEqual(retval, obj)
mock_obj.assert_called_once_with(token=self.AUTH_TOKEN,
endpoint_url=self.ENDPOINT_URL)
def test_get_servicechain_node(self, mock_obj):
instance = mock_obj.return_value
obj = instance.show_servicechain_node(
'node_id')['servicechain_node']
retval = self.gbp_obj.get_servicechain_node(self.AUTH_TOKEN,
'node_id')
self.assertEqual(retval, obj)
mock_obj.assert_called_once_with(token=self.AUTH_TOKEN,
endpoint_url=self.ENDPOINT_URL)
def test_get_servicechain_instance(self, mock_obj):
instance = mock_obj.return_value
obj = instance.show_servicechain_instance(
'instance_id')['servicechain_instance']
retval = self.gbp_obj.get_servicechain_instance(self.AUTH_TOKEN,
'instance_id')
self.assertEqual(retval, obj)
mock_obj.assert_called_once_with(token=self.AUTH_TOKEN,
endpoint_url=self.ENDPOINT_URL)

View File

@ -37,8 +37,6 @@ cfg.CONF.import_opt('policy_drivers',
GP_PLUGIN_KLASS = (
"gbpservice.neutron.services.grouppolicy.plugin.GroupPolicyPlugin"
)
SERVICECHAIN_SPECS = 'servicechain/servicechain_specs'
SERVICECHAIN_NODES = 'servicechain/servicechain_nodes'
class FakeDriver(object):
@ -118,38 +116,6 @@ class GroupPolicyPluginTestBase(tgpmdb.GroupPolicyMappingDbTestCase):
external_segment_id=es['external_segment']['id'],
**kwargs)['nat_pool']
def _create_servicechain_spec(self, node_types=None, shared=False):
node_types = node_types or []
if not node_types:
node_types = ['LOADBALANCERV2']
node_ids = []
for node_type in node_types:
node_ids.append(self._create_servicechain_node(node_type,
shared=shared))
data = {'servicechain_spec': {'tenant_id': self._tenant_id if not
shared else 'another-tenant',
'nodes': node_ids,
'shared': shared}}
scs_req = self.new_create_request(
SERVICECHAIN_SPECS, data, self.fmt)
spec = self.deserialize(
self.fmt, scs_req.get_response(self.ext_api))
scs_id = spec['servicechain_spec']['id']
return scs_id
def _create_servicechain_node(self, node_type="LOADBALANCERV2",
shared=False):
config = "{}"
data = {'servicechain_node': {'service_type': node_type,
'tenant_id': self._tenant_id if not
shared else 'another-tenant',
'config': config,
'shared': shared}}
scn_req = self.new_create_request(SERVICECHAIN_NODES, data, self.fmt)
node = self.deserialize(self.fmt, scn_req.get_response(self.ext_api))
scn_id = node['servicechain_node']['id']
return scn_id
def _get_object(self, type, id, api, expected_res_status=None):
req = self.new_show_request(type, id, self.fmt)
res = req.get_response(api)
@ -1064,55 +1030,6 @@ class TestResourceStatusChange(GroupPolicyPluginTestCase):
self._test_status_change_on_list(resource_name, fields=['name'])
class TestPolicyAction(GroupPolicyPluginTestCase):
def test_redirect_value(self):
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT'])
res = self.create_policy_action(action_type='redirect',
action_value=scs_id, shared=True,
expected_res_status=400)
self.assertEqual(
'SharedResourceReferenceError', res['NeutronError']['type'])
res = self.create_policy_action(
action_type='redirect', action_value=scs_id, tenant_id='different',
expected_res_status=404)
self.assertEqual(
'ServiceChainSpecNotFound', res['NeutronError']['type'])
res = self.create_policy_action(
action_type='redirect', action_value=scs_id, tenant_id='different',
expected_res_status=201, is_admin_context=True)
res = self.create_policy_action(
action_type='redirect', action_value=scs_id,
expected_res_status=201)['policy_action']
res = self.update_policy_action(
res['id'], shared=True, expected_res_status=400)
self.assertEqual(
'SharedResourceReferenceError', res['NeutronError']['type'])
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT'], shared=True)
self.create_policy_action(
action_type='redirect', action_value=scs_id, shared=True,
expected_res_status=201)
data = {'servicechain_spec': {'shared': False}}
scs_req = self.new_update_request(
SERVICECHAIN_SPECS, data, scs_id, self.fmt)
res = self.deserialize(
self.fmt, scs_req.get_response(self.ext_api))
self.assertEqual(
'InvalidSharedAttributeUpdate', res['NeutronError']['type'])
def test_redirect_shared_create(self):
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT'], shared=True)
self.create_policy_action(action_type='redirect', action_value=scs_id,
shared=True, expected_res_status=201)
class TestGroupPolicyPluginGroupResources(
GroupPolicyPluginTestCase, tgpdb.TestGroupResources):

View File

@ -19,8 +19,6 @@ from neutron_lib.plugins import directory
import webob.exc
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.servicechain.plugins.ncp import (
config as sc_cfg)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_grouppolicy_plugin as test_plugin)
@ -40,11 +38,6 @@ class CommonNeutronBaseTestCase(test_plugin.GroupPolicyPluginTestBase):
config.cfg.CONF.set_override('policy_drivers',
policy_drivers,
group='group_policy')
sc_cfg.cfg.CONF.set_override('node_drivers',
['dummy_driver'],
group='node_composition_plugin')
sc_cfg.cfg.CONF.set_override('node_plumber', 'dummy_plumber',
group='node_composition_plugin')
config.cfg.CONF.set_override('allow_overlapping_ips', True)
super(CommonNeutronBaseTestCase, self).setUp(core_plugin=core_plugin,
l3_plugin=l3_plugin,

View File

@ -1,349 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import base as test_extensions_base
from neutron_lib.plugins import constants
from oslo_utils import uuidutils
from webob import exc
from gbpservice.neutron.extensions import servicechain
from gbpservice.neutron.tests.unit import common as cm
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
SERVICE_CHAIN_PLUGIN_BASE_NAME = (
servicechain.ServiceChainPluginBase.__module__ + '.' +
servicechain.ServiceChainPluginBase.__name__)
SERVICECHAIN_URI = 'servicechain'
SERVICECHAIN_NODES_URI = SERVICECHAIN_URI + '/' + 'servicechain_nodes'
SERVICECHAIN_SPECS_URI = SERVICECHAIN_URI + '/' + 'servicechain_specs'
SERVICECHAIN_INSTANCES_URI = SERVICECHAIN_URI + '/' + 'servicechain_instances'
SERVICE_PROFILE_URI = SERVICECHAIN_URI + '/' + 'service_profiles'
class ServiceChainExtensionTestCase(test_extensions_base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(ServiceChainExtensionTestCase, self).setUp()
plural_mappings = {}
self.setup_extension(
SERVICE_CHAIN_PLUGIN_BASE_NAME, constants.SERVICECHAIN,
servicechain.Servicechain,
SERVICECHAIN_URI, plural_mappings=plural_mappings)
self.instance = self.plugin.return_value
def _test_create_servicechain_node(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_servicechain_node.return_value = expected_value
res = self.api.post(_get_path(SERVICECHAIN_NODES_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_servicechain_node.assert_called_once_with(
mock.ANY, servicechain_node=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_node', res)
self.assertEqual(expected_value, res['servicechain_node'])
def test_create_servicechain_node_with_defaults(self):
servicechain_node_id = _uuid()
data = {
'servicechain_node': {
'service_profile_id': _uuid(),
'tenant_id': _uuid(),
'config': 'test_config',
'service_type': None,
}
}
default_attrs = (
cm.get_create_servicechain_node_default_attrs_and_prj_id())
default_data = copy.copy(data)
default_data['servicechain_node'].update(default_attrs)
expected_value = dict(default_data['servicechain_node'])
expected_value['id'] = servicechain_node_id
self._test_create_servicechain_node(data, expected_value, default_data)
def test_create_servicechain_node(self):
servicechain_node_id = _uuid()
data = {
'servicechain_node': cm.get_create_servicechain_node_attrs()
}
expected_value = dict(data['servicechain_node'])
expected_value['id'] = servicechain_node_id
self._test_create_servicechain_node(data, expected_value)
def test_list_servicechain_nodes(self):
servicechain_node_id = _uuid()
expected_value = [{'tenant_id': _uuid(), 'id': servicechain_node_id}]
self.instance.get_servicechain_nodes.return_value = expected_value
res = self.api.get(_get_path(SERVICECHAIN_NODES_URI, fmt=self.fmt))
self.instance.get_servicechain_nodes.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_nodes', res)
self.assertEqual(expected_value, res['servicechain_nodes'])
def test_get_servicechain_node(self):
servicechain_node_id = _uuid()
expected_value = {
'tenant_id': _uuid(), 'id': servicechain_node_id}
self.instance.get_servicechain_node.return_value = expected_value
res = self.api.get(_get_path(SERVICECHAIN_NODES_URI,
id=servicechain_node_id,
fmt=self.fmt))
self.instance.get_servicechain_node.assert_called_once_with(
mock.ANY, servicechain_node_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_node', res)
self.assertEqual(expected_value, res['servicechain_node'])
def test_update_servicechain_node(self):
servicechain_node_id = _uuid()
update_data = {
'servicechain_node': cm.get_update_servicechain_node_attrs()
}
expected_value = {'tenant_id': _uuid(), 'id': servicechain_node_id}
self.instance.update_servicechain_node.return_value = expected_value
res = self.api.put(_get_path(SERVICECHAIN_NODES_URI,
id=servicechain_node_id,
fmt=self.fmt),
self.serialize(update_data))
self.instance.update_servicechain_node.assert_called_once_with(
mock.ANY, servicechain_node_id, servicechain_node=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_node', res)
self.assertEqual(expected_value, res['servicechain_node'])
def test_delete_servicechain_node(self):
self._test_entity_delete('servicechain_node')
def _test_create_servicechain_spec(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_servicechain_spec.return_value = expected_value
res = self.api.post(_get_path(SERVICECHAIN_SPECS_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_servicechain_spec.assert_called_once_with(
mock.ANY, servicechain_spec=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_spec', res)
self.assertEqual(expected_value, res['servicechain_spec'])
def test_create_servicechain_spec_with_defaults(self):
servicechain_spec_id = _uuid()
data = {
'servicechain_spec': {
'nodes': [_uuid(), _uuid()], 'tenant_id': _uuid()
}
}
default_attrs = (
cm.get_create_servicechain_spec_default_attrs_and_prj_id())
default_data = copy.copy(data)
default_data['servicechain_spec'].update(default_attrs)
expected_value = dict(default_data['servicechain_spec'])
expected_value['id'] = servicechain_spec_id
self._test_create_servicechain_spec(data, expected_value, default_data)
def test_create_servicechain_spec(self):
servicechain_spec_id = _uuid()
data = {
'servicechain_spec': cm.get_create_servicechain_spec_attrs()
}
expected_value = dict(data['servicechain_spec'])
expected_value['id'] = servicechain_spec_id
self._test_create_servicechain_spec(data, expected_value)
def test_list_servicechain_specs(self):
servicechain_spec_id = _uuid()
expected_value = [{'tenant_id': _uuid(), 'id': servicechain_spec_id}]
self.instance.get_servicechain_specs.return_value = expected_value
res = self.api.get(_get_path(SERVICECHAIN_SPECS_URI, fmt=self.fmt))
self.instance.get_servicechain_specs.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_specs', res)
self.assertEqual(expected_value, res['servicechain_specs'])
def test_get_servicechain_spec(self):
servicechain_spec_id = _uuid()
expected_value = {'tenant_id': _uuid(), 'id': servicechain_spec_id}
self.instance.get_servicechain_spec.return_value = expected_value
res = self.api.get(_get_path(SERVICECHAIN_SPECS_URI,
id=servicechain_spec_id,
fmt=self.fmt))
self.instance.get_servicechain_spec.assert_called_once_with(
mock.ANY, servicechain_spec_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_spec', res)
self.assertEqual(expected_value, res['servicechain_spec'])
def test_update_servicechain_spec(self):
servicechain_spec_id = _uuid()
update_data = {
'servicechain_spec': cm.get_update_servicechain_spec_attrs()
}
expected_value = {'tenant_id': _uuid(), 'id': servicechain_spec_id}
self.instance.update_servicechain_spec.return_value = expected_value
res = self.api.put(_get_path(SERVICECHAIN_SPECS_URI,
id=servicechain_spec_id,
fmt=self.fmt),
self.serialize(update_data))
self.instance.update_servicechain_spec.assert_called_once_with(
mock.ANY, servicechain_spec_id, servicechain_spec=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_spec', res)
self.assertEqual(expected_value, res['servicechain_spec'])
def test_delete_servicechain_spec(self):
self._test_entity_delete('servicechain_spec')
def _test_create_servicechain_instance(self, data, expected_value,
default_data=None):
if not default_data:
default_data = data
self.instance.create_servicechain_instance.return_value = (
expected_value)
res = self.api.post(_get_path(SERVICECHAIN_INSTANCES_URI,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.instance.create_servicechain_instance.assert_called_once_with(
mock.ANY, servicechain_instance=default_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_instance', res)
self.assertEqual(expected_value, res['servicechain_instance'])
def test_create_servicechain_instance_with_defaults(self):
servicechain_instance_id = _uuid()
data = {
'servicechain_instance': {
'servicechain_specs': [_uuid()],
'tenant_id': _uuid(),
'provider_ptg_id': _uuid(),
'consumer_ptg_id': _uuid(),
'management_ptg_id': _uuid(),
'classifier_id': _uuid(),
}
}
default_attrs = (
cm.get_create_servicechain_instance_default_attrs_and_prj_id())
default_data = copy.copy(data)
default_data['servicechain_instance'].update(default_attrs)
expected_value = dict(default_data['servicechain_instance'])
expected_value['id'] = servicechain_instance_id
self._test_create_servicechain_instance(data, expected_value,
default_data)
def test_create_servicechain_instance(self):
servicechain_instance_id = _uuid()
data = {'servicechain_instance':
cm.get_create_servicechain_instance_attrs()}
expected_value = dict(data['servicechain_instance'])
expected_value['id'] = servicechain_instance_id
self._test_create_servicechain_instance(data, expected_value)
def test_list_servicechain_instances(self):
servicechain_instance_id = _uuid()
expected_value = [{'tenant_id': _uuid(),
'id': servicechain_instance_id}]
self.instance.get_servicechain_instances.return_value = expected_value
res = self.api.get(_get_path(SERVICECHAIN_INSTANCES_URI, fmt=self.fmt))
self.instance.get_servicechain_instances.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_instances', res)
self.assertEqual(expected_value, res['servicechain_instances'])
def test_get_servicechain_instance(self):
servicechain_instance_id = _uuid()
expected_value = {'tenant_id': _uuid(), 'id': servicechain_instance_id}
self.instance.get_servicechain_instance.return_value = expected_value
res = self.api.get(_get_path(SERVICECHAIN_INSTANCES_URI,
id=servicechain_instance_id,
fmt=self.fmt))
self.instance.get_servicechain_instance.assert_called_once_with(
mock.ANY, servicechain_instance_id, fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_instance', res)
self.assertEqual(expected_value, res['servicechain_instance'])
def test_update_servicechain_instance(self):
servicechain_instance_id = _uuid()
update_data = {'servicechain_instance':
cm.get_update_servicechain_instance_attrs()}
expected_value = {'tenant_id': _uuid(), 'id': servicechain_instance_id}
self.instance.update_servicechain_instance.return_value = (
expected_value)
res = self.api.put(_get_path(SERVICECHAIN_INSTANCES_URI,
id=servicechain_instance_id,
fmt=self.fmt),
self.serialize(update_data))
self.instance.update_servicechain_instance.assert_called_once_with(
mock.ANY, servicechain_instance_id,
servicechain_instance=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('servicechain_instance', res)
self.assertEqual(expected_value, res['servicechain_instance'])
def test_delete_servicechain_instance(self):
self._test_entity_delete('servicechain_instance')