diff --git a/gbpservice/common/utils.py b/gbpservice/common/utils.py index d6b859c00..f4ccf99bf 100644 --- a/gbpservice/common/utils.py +++ b/gbpservice/common/utils.py @@ -39,4 +39,4 @@ class DictClass(dict): return self[item] __setattr__ = dict.__setattr__ - __delattr__ = dict.__delattr__ \ No newline at end of file + __delattr__ = dict.__delattr__ diff --git a/gbpservice/neutron/plugins/ml2/drivers/grouppolicy/apic/driver.py b/gbpservice/neutron/plugins/ml2/drivers/grouppolicy/apic/driver.py index 04fd43fa8..1cfa35f79 100644 --- a/gbpservice/neutron/plugins/ml2/drivers/grouppolicy/apic/driver.py +++ b/gbpservice/neutron/plugins/ml2/drivers/grouppolicy/apic/driver.py @@ -13,18 +13,51 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron.common import constants as n_constants +from neutron.extensions import portbindings from neutron import manager from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent +from opflexagent import constants as ofcst from oslo_log import log +from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( + apic_mapping as amap) + LOG = log.getLogger(__name__) -class APICMechanismGBPDriver(api.MechanismDriver): +class APICMechanismGBPDriver(mech_agent.AgentMechanismDriverBase): + + def __init__(self): + self.vif_details = {portbindings.CAP_PORT_FILTER: False, + portbindings.OVS_HYBRID_PLUG: False} + self.vif_type = portbindings.VIF_TYPE_OVS + super(APICMechanismGBPDriver, self).__init__( + ofcst.AGENT_TYPE_OPFLEX_OVS) + + def try_to_bind_segment_for_agent(self, context, segment, agent): + if self.check_segment_for_agent(segment, agent): + context.set_binding( + segment[api.ID], self.vif_type, self.vif_details) + return True + else: + return False + + def check_segment_for_agent(self, segment, agent): + network_type = segment[api.NETWORK_TYPE] + if network_type == ofcst.TYPE_OPFLEX: + opflex_mappings = agent['configurations'].get('opflex_networks') + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s "), + {'segment': segment, 'mappings': opflex_mappings}) + return ((opflex_mappings is None) or + (segment[api.PHYSICAL_NETWORK] in opflex_mappings)) + else: + return False def initialize(self): + super(APICMechanismGBPDriver, self).initialize() self._apic_gbp = None @property @@ -36,16 +69,30 @@ class APICMechanismGBPDriver(api.MechanismDriver): return self._apic_gbp def create_port_postcommit(self, context): - # DHCP Ports are created implicitly by Neutron, need to inform GBP - if (context.current.get('device_owner') == - n_constants.DEVICE_OWNER_DHCP): - self.apic_gbp.create_dhcp_policy_target_if_needed( - context._plugin_context, context.current) + self.apic_gbp.process_port_added( + context._plugin_context, context.current) def update_port_postcommit(self, context): self.apic_gbp.process_port_changed(context._plugin_context, context.original, context.current) + def delete_port_precommit(self, context): + self.apic_gbp.process_pre_port_deleted(context._plugin_context, + context.current) + + def delete_port_postcommit(self, context): + self.apic_gbp.process_port_deleted(context._plugin_context, + context.current) + def update_subnet_postcommit(self, context): self.apic_gbp.process_subnet_changed(context._plugin_context, context.original, context.current) + + def create_subnet_postcommit(self, context): + if not context.current['name'].startswith(amap.APIC_OWNED): + self.apic_gbp.process_subnet_added(context._plugin_context, + context.current) + + def delete_subnet_postcommit(self, context): + self.apic_gbp.process_subnet_deleted(context._plugin_context, + context.current) diff --git a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/apic_mapping.py b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/apic_mapping.py index 701b76cf2..ee7939899 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/apic_mapping.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/apic_mapping.py @@ -14,38 +14,36 @@ import netaddr from apicapi import apic_manager from keystoneclient.v2_0 import client as keyclient +from neutron.api.v2 import attributes +from neutron.common import constants as n_constants from neutron.common import exceptions as n_exc -from neutron.extensions import providernet as pn -from neutron.extensions import securitygroup as ext_sg +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context as nctx +from neutron.extensions import portbindings from neutron import manager from neutron.plugins.ml2.drivers.cisco.apic import apic_model from neutron.plugins.ml2.drivers.cisco.apic import config -from neutron.plugins.ml2 import models +from opflexagent import constants as ofcst +from opflexagent import rpc from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb +from gbpservice.neutron.extensions import group_policy as gpolicy from gbpservice.neutron.services.grouppolicy.common import constants as g_const from gbpservice.neutron.services.grouppolicy.common import exceptions as gpexc from gbpservice.neutron.services.grouppolicy.drivers import ( resource_mapping as api) +from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( + nova_client as nclient) +from gbpservice.neutron.services.grouppolicy import group_policy_context LOG = logging.getLogger(__name__) -class L2PolicyMultiplePolicyTargetGroupNotSupportedOnApicDriver( - gpexc.GroupPolicyBadRequest): - message = _("An L2 policy can't have multiple policy target groups on " - "APIC GBP driver.") - - -class RedirectActionNotSupportedOnApicDriver(gpexc.GroupPolicyBadRequest): - message = _("Redirect action is currently not supported for APIC GBP " - "driver.") - - class PolicyRuleUpdateNotSupportedOnApicDriver(gpexc.GroupPolicyBadRequest): message = _("Policy rule update is not supported on APIC GBP" "driver.") @@ -79,6 +77,26 @@ class SharedAttributeUpdateNotSupportedOnApic(gpexc.GroupPolicyBadRequest): "GBP driver for resource of type %(type)s") +class ExplicitSubnetAssociationNotSupported(gpexc.GroupPolicyBadRequest): + message = _("Explicit subnet association not supported by APIC driver.") + + +class HierarchicalContractsNotSupported(gpexc.GroupPolicyBadRequest): + message = _("Hierarchical contracts not supported by APIC driver.") + +REVERSE_PREFIX = 'reverse-' +SHADOW_PREFIX = 'Shd-' +SERVICE_PREFIX = 'Svc-' +IMPLICIT_PREFIX = 'implicit-' +ANY_PREFIX = 'any-' +PROMISCUOUS_SUFFIX = 'promiscuous' +APIC_OWNED = 'apic_owned_' +PROMISCUOUS_TYPES = [n_constants.DEVICE_OWNER_DHCP, + n_constants.DEVICE_OWNER_LOADBALANCER] +ALLOWING_ACTIONS = [g_const.GP_ACTION_ALLOW, g_const.GP_ACTION_REDIRECT] +REVERTIBLE_PROTOCOLS = [n_constants.PROTO_NAME_TCP.lower()] + + class ApicMappingDriver(api.ResourceMappingDriver): """Apic Mapping driver for Group Policy plugin. @@ -96,6 +114,7 @@ class ApicMappingDriver(api.ResourceMappingDriver): apic_config = cfg.CONF.ml2_cisco_apic network_config = { 'vlan_ranges': cfg.CONF.ml2_type_vlan.network_vlan_ranges, + 'vni_ranges': cfg.CONF.ml2_type_vxlan.vni_ranges, 'switch_dict': config.create_switch_dictionary(), 'vpc_dict': config.create_vpc_dictionary(), 'external_network_dict': @@ -114,10 +133,23 @@ class ApicMappingDriver(api.ResourceMappingDriver): def initialize(self): super(ApicMappingDriver, self).initialize() + self._setup_rpc_listeners() + self._setup_rpc() self.apic_manager = ApicMappingDriver.get_apic_manager() self.name_mapper = self.apic_manager.apic_mapper self._gbp_plugin = None + def _setup_rpc_listeners(self): + self.endpoints = [rpc.GBPServerRpcCallback(self)] + self.topic = rpc.TOPIC_OPFLEX + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + return self.conn.consume_in_threads() + + def _setup_rpc(self): + self.notifier = rpc.AgentNotifierApi(topics.AGENT) + @property def gbp_plugin(self): if not self._gbp_plugin: @@ -125,64 +157,62 @@ class ApicMappingDriver(api.ResourceMappingDriver): .get("GROUP_POLICY")) return self._gbp_plugin + # RPC Method def get_gbp_details(self, context, **kwargs): - port_id = (kwargs.get('port_id') or - self._core_plugin._device_to_port_id(kwargs['device'])) - port = self._core_plugin.get_port(context, port_id) - # retrieve PTG and network from a given Port - if not kwargs.get('policy_target'): - ptg, network = self._port_to_ptg_network(context, port, - kwargs['host']) - if not ptg: - return - else: - pt = kwargs['policy_target'] - ptg = self.gbp_plugin.get_policy_target_group( - context, pt['policy_target_group_id']) - network = self._l2p_id_to_network(context, ptg['l2_policy_id']) - - return {'port_id': port_id, - 'mac_address': port['mac_address'], - 'ptg_id': ptg['id'], - 'segmentation_id': network[pn.SEGMENTATION_ID], - 'network_type': network[pn.NETWORK_TYPE], - 'l2_policy_id': ptg['l2_policy_id'], - 'tenant_id': port['tenant_id'], - 'host': port['binding:host_id'], - 'ptg_apic_tentant': (ptg['tenant_id'] if not ptg['shared'] else - apic_manager.TENANT_COMMON) - } - - def create_dhcp_policy_target_if_needed(self, plugin_context, port): - session = plugin_context.session - if (self._port_is_owned(session, port['id'])): - # Nothing to do + port_id = self._core_plugin._device_to_port_id( + kwargs['device']) + port_context = self._core_plugin.get_bound_port_context( + context, port_id, kwargs['host']) + if not port_context: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s not found in database"), + {'device': port_id, + 'agent_id': kwargs.get('agent_id')}) return - # Retrieve PTG - fixed_ips = port['fixed_ips'] - if fixed_ips: - port_subnet_id = fixed_ips[0]['subnet_id'] - ptg = self._get_ptg_by_subnet(plugin_context, port_subnet_id) - if ptg: - # Create PolicyTarget - attrs = {'policy_target': - {'tenant_id': port['tenant_id'], - 'name': 'dhcp-%s' % ptg['id'], - 'description': _("Implicitly created DHCP policy " - "target"), - 'policy_target_group_id': ptg['id'], - 'port_id': port['id']}} - self.gbp_plugin.create_policy_target(plugin_context, attrs) - sg_id = self._get_default_security_group(plugin_context, - ptg['id'], - port['tenant_id']) - data = {'port': {'security_groups': [sg_id]}} - self._core_plugin.update_port(plugin_context, port['id'], data) + port = port_context.current + + # retrieve PTG from a given Port + ptg = self._port_id_to_ptg(context, port['id']) + l2p = self._network_id_to_l2p(context, port['network_id']) + if not ptg and not l2p: + return + context._plugin = self.gbp_plugin + context._plugin_context = context + + l2_policy_id = l2p['id'] + ptg_tenant = self._tenant_by_sharing_policy(ptg or l2p) + if ptg: + endpoint_group_name = self.name_mapper.policy_target_group( + context, ptg['id']) + else: + endpoint_group_name = self.name_mapper.l2_policy( + context, l2p['id'], prefix=SHADOW_PREFIX) + + def is_port_promiscuous(port): + return (port['device_owner'] in PROMISCUOUS_TYPES or + port['name'].endswith(PROMISCUOUS_SUFFIX)) + + details = {'device': kwargs.get('device'), + 'port_id': port_id, + 'mac_address': port['mac_address'], + 'app_profile_name': str( + self.apic_manager.app_profile_name), + 'l2_policy_id': l2_policy_id, + 'tenant_id': port['tenant_id'], + 'host': port[portbindings.HOST_ID], + 'ptg_tenant': str(ptg_tenant), + 'endpoint_group_name': str(endpoint_group_name), + 'promiscuous_mode': is_port_promiscuous(port)} + if port['device_owner'].startswith('compute:') and port['device_id']: + vm = nclient.NovaClient().get_server(port['device_id']) + details['vm-name'] = vm.name if vm else port['device_id'] + return details + + def process_port_added(self, plugin_context, port): + pass def create_policy_action_precommit(self, context): - # TODO(ivar): allow redirect for service chaining - if context.current['action_type'] == g_const.GP_ACTION_REDIRECT: - raise RedirectActionNotSupportedOnApicDriver() + pass def create_policy_rule_precommit(self, context): if ('policy_actions' in context.current and @@ -190,29 +220,48 @@ class ApicMappingDriver(api.ResourceMappingDriver): # TODO(ivar): to be fixed when redirect is supported raise ExactlyOneActionPerRuleIsSupportedOnApicDriver() - def create_policy_rule_postcommit(self, context): + def create_policy_rule_postcommit(self, context, transaction=None): action = context._plugin.get_policy_action( context._plugin_context, context.current['policy_actions'][0]) classifier = context._plugin.get_policy_classifier( context._plugin_context, context.current['policy_classifier_id']) - if action['action_type'] == g_const.GP_ACTION_ALLOW: + if action['action_type'] in ALLOWING_ACTIONS: port_min, port_max = ( gpdb.GroupPolicyMappingDbPlugin._get_min_max_ports_from_range( classifier['port_range'])) - attrs = {'etherT': 'ip', - 'prot': classifier['protocol'].lower()} - if port_min and port_max: - attrs['dToPort'] = port_max - attrs['dFromPort'] = port_min + attrs = {'etherT': 'unspecified'} + if classifier['protocol']: + attrs['etherT'] = 'ip' + attrs['prot'] = classifier['protocol'].lower() + if port_min and port_max: + attrs['dToPort'] = port_max + attrs['dFromPort'] = port_min tenant = self._tenant_by_sharing_policy(context.current) policy_rule = self.name_mapper.policy_rule(context, context.current['id']) - self.apic_manager.create_tenant_filter(policy_rule, owner=tenant, - **attrs) + with self.apic_manager.apic.transaction(transaction) as trs: + self.apic_manager.create_tenant_filter( + policy_rule, owner=tenant, transaction=trs, **attrs) + # Also create reverse rule + if attrs.get('prot') in REVERTIBLE_PROTOCOLS: + if attrs['prot'] == n_constants.PROTO_NAME_TCP.lower(): + policy_rule = self.name_mapper.policy_rule( + context, context.current['id'], + prefix=REVERSE_PREFIX) + if attrs.get('dToPort') and attrs.get('dFromPort'): + attrs.pop('dToPort') + attrs.pop('dFromPort') + attrs['sToPort'] = port_max + attrs['sFromPort'] = port_min + attrs['tcpRules'] = 'est' + self.apic_manager.create_tenant_filter( + policy_rule, owner=tenant, transaction=trs, + **attrs) def create_policy_rule_set_precommit(self, context): - pass + if context.current['child_policy_rule_sets']: + raise HierarchicalContractsNotSupported() def create_policy_rule_set_postcommit(self, context): # Create APIC policy_rule_set @@ -229,18 +278,20 @@ class ApicMappingDriver(api.ResourceMappingDriver): context, context.current, rules, transaction=trs) def create_policy_target_postcommit(self, context): - # The path needs to be created at bind time, this will be taken - # care by the GBP ML2 apic driver. - super(ApicMappingDriver, self).create_policy_target_postcommit(context) - self._manage_policy_target_port( - context._plugin_context, context.current) + if not context.current['port_id']: + self._use_implicit_port(context) + port = self._core_plugin.get_port(context._plugin_context, + context.current['port_id']) + if self._is_port_bound(port): + self._notify_port_update(context._plugin_context, port['id']) def create_policy_target_group_precommit(self, context): - pass + if context.current['subnets']: + raise ExplicitSubnetAssociationNotSupported() def create_policy_target_group_postcommit(self, context): - super(ApicMappingDriver, self).create_policy_target_group_postcommit( - context) + if not context.current['subnets']: + self._use_implicit_subnet(context) tenant = self._tenant_by_sharing_policy(context.current) l2_policy = self.name_mapper.l2_policy(context, context.current['l2_policy_id']) @@ -253,18 +304,17 @@ class ApicMappingDriver(api.ResourceMappingDriver): self.apic_manager.ensure_epg_created(tenant, epg, bd_owner=bd_owner, bd_name=l2_policy) - subnets = self._subnet_ids_to_objects(context._plugin_context, - context.current['subnets']) - self._manage_ptg_subnets(context._plugin_context, context.current, - subnets, [], transaction=trs) - self._manage_ptg_policy_rule_sets( - context._plugin_context, context.current, - context.current['provided_policy_rule_sets'], - context.current['consumed_policy_rule_sets'], [], [], - transaction=trs) - self._update_default_security_group( - context._plugin_context, context.current['id'], - context.current['tenant_id'], context.current['subnets']) + + l2p = context._plugin.get_l2_policy( + context._plugin_context, context.current['l2_policy_id']) + self._configure_epg_service_contract( + context, context.current, l2p, epg, transaction=trs) + self._configure_epg_implicit_contract( + context, context.current, l2p, epg, transaction=trs) + + self._manage_ptg_policy_rule_sets( + context, context.current['provided_policy_rule_sets'], + context.current['consumed_policy_rule_sets'], [], []) def create_l2_policy_precommit(self, context): self._reject_non_shared_net_on_shared_l2p(context) @@ -282,9 +332,25 @@ class ApicMappingDriver(api.ResourceMappingDriver): l3_policy_object = context._plugin.get_l3_policy( context._plugin_context, context.current['l3_policy_id']) ctx_owner = self._tenant_by_sharing_policy(l3_policy_object) - self.apic_manager.ensure_bd_created_on_apic(tenant, l2_policy, - ctx_owner=ctx_owner, - ctx_name=l3_policy) + with self.apic_manager.apic.transaction(None) as trs: + self.apic_manager.ensure_bd_created_on_apic( + tenant, l2_policy, ctx_owner=ctx_owner, ctx_name=l3_policy, + transaction=trs) + # Create neutron port EPG + self._configure_shadow_epg(context, context.current, l2_policy, + transaction=trs) + self._configure_implicit_contract(context, context.current, + transaction=trs) + # Add existing subnets + net_id = context.current['network_id'] + subnets = self._core_plugin.get_subnets(context._plugin_context, + {'network_id': [net_id]}) + self._manage_l2p_subnets( + context._plugin_context, context.current['id'], subnets, [], + transaction=trs) + + def update_l2_policy_postcommit(self, context): + pass def create_l3_policy_precommit(self, context): self._check_l3p_es(context) @@ -303,18 +369,30 @@ class ApicMappingDriver(api.ResourceMappingDriver): self._plug_l3p_to_es(context, es) def delete_policy_rule_postcommit(self, context): - # TODO(ivar): delete Contract subject entries to avoid reference leak + for prs in context._plugin.get_policy_rule_sets( + context._plugin_context, + filters={'id': context.current['policy_rule_sets']}): + self._remove_policy_rule_set_rules(context, prs, [context.current]) + self._delete_policy_rule_from_apic(context) + + def _delete_policy_rule_from_apic(self, context, transaction=None): tenant = self._tenant_by_sharing_policy(context.current) policy_rule = self.name_mapper.policy_rule(context, context.current['id']) - self.apic_manager.delete_tenant_filter(policy_rule, owner=tenant) + with self.apic_manager.apic.transaction(transaction) as trs: + self.apic_manager.delete_tenant_filter(policy_rule, owner=tenant, + transaction=trs) + # Delete policy reverse rule + policy_rule = self.name_mapper.policy_rule( + context, context.current['id'], prefix=REVERSE_PREFIX) + self.apic_manager.delete_tenant_filter(policy_rule, owner=tenant, + transaction=trs) def delete_policy_rule_set_precommit(self, context): # Intercept Parent Call pass def delete_policy_rule_set_postcommit(self, context): - # TODO(ivar): disassociate PTGs to avoid reference leak tenant = self._tenant_by_sharing_policy(context.current) contract = self.name_mapper.policy_rule_set(context, context.current['id']) @@ -322,26 +400,23 @@ class ApicMappingDriver(api.ResourceMappingDriver): def delete_policy_target_postcommit(self, context): try: - port = self._core_plugin.get_port(context._plugin_context, - context.current['port_id']) + if context.current['port_id']: + port = self._core_plugin.get_port(context._plugin_context, + context.current['port_id']) + # Delete Neutron's port + port_id = context.current['port_id'] + self._cleanup_port(context._plugin_context, port_id) + # Notify the agent. If the port has been deleted by the + # parent method the notification will not be done + self._notify_port_update(context._plugin_context, port['id']) except n_exc.PortNotFound: LOG.warn(_("Port %s is missing") % context.current['port_id']) return - if port['binding:host_id']: - self.process_path_deletion(context._plugin_context, port, - policy_target=context.current) - # Delete Neutron's port - super(ApicMappingDriver, self).delete_policy_target_postcommit(context) + def delete_policy_target_group_precommit(self, context): + pass def delete_policy_target_group_postcommit(self, context): - if context.current['subnets']: - subnets = self._subnet_ids_to_objects(context._plugin_context, - context.current['subnets']) - self._manage_ptg_subnets(context._plugin_context, context.current, - [], subnets) - for subnet_id in context.current['subnets']: - self._cleanup_subnet(context._plugin_context, subnet_id, None) tenant = self._tenant_by_sharing_policy(context.current) ptg = self.name_mapper.policy_target_group(context, context.current['id']) @@ -353,7 +428,13 @@ class ApicMappingDriver(api.ResourceMappingDriver): tenant = self._tenant_by_sharing_policy(context.current) l2_policy = self.name_mapper.l2_policy(context, context.current['id']) - self.apic_manager.delete_bd_on_apic(tenant, l2_policy) + with self.apic_manager.apic.transaction(None) as trs: + self.apic_manager.delete_bd_on_apic( + tenant, l2_policy, transaction=trs) + # Delete neutron port EPG + self._delete_shadow_epg(context, context.current, transaction=trs) + self._delete_implicit_contract(context, context.current, + transaction=trs) def delete_l3_policy_postcommit(self, context): tenant = self._tenant_by_sharing_policy(context.current) @@ -371,19 +452,68 @@ class ApicMappingDriver(api.ResourceMappingDriver): def update_policy_rule_set_precommit(self, context): self._reject_shared_update(context, 'policy_rule_set') + self._reject_multiple_redirects_in_prs(context) + if context.current['child_policy_rule_sets']: + raise HierarchicalContractsNotSupported() + # If a redirect action is added (from 0 to one) we have to validate + # the providing and consuming PTGs + old_red_count = self._multiple_pr_redirect_action_number( + context._plugin_context.session, context.original['policy_rules']) + new_red_count = self._multiple_pr_redirect_action_number( + context._plugin_context.session, context.current['policy_rules']) + if new_red_count > old_red_count: + self._validate_new_prs_redirect(context, context.current) + + def update_policy_rule_set_postcommit(self, context): + # Update policy_rule_set rules + old_rules = set(context.original['policy_rules']) + new_rules = set(context.current['policy_rules']) + to_add = context._plugin.get_policy_rules( + context._plugin_context, {'id': new_rules - old_rules}) + to_remove = context._plugin.get_policy_rules( + context._plugin_context, {'id': old_rules - new_rules}) + self._remove_policy_rule_set_rules(context, context.current, to_remove) + self._apply_policy_rule_set_rules(context, context.current, to_add) + + def update_policy_target_precommit(self, context): + if (context.original['policy_target_group_id'] != + context.current['policy_target_group_id']): + if context.current['policy_target_group_id']: + self._validate_pt_port_subnets(context) def update_policy_target_postcommit(self, context): - # TODO(ivar): redo binding procedure if the PTG is modified, - # not doable unless driver extension framework is in place - pass + if (context.original['policy_target_group_id'] != + context.current['policy_target_group_id']): + self._notify_port_update(context._plugin_context, + context.current['port_id']) def update_policy_rule_precommit(self, context): - # TODO(ivar): add support for action update on policy rules - raise PolicyRuleUpdateNotSupportedOnApicDriver() + self._reject_multiple_redirects_in_rule(context) + old_redirect = self._get_redirect_action(context, context.original) + new_redirect = self._get_redirect_action(context, context.current) + if not old_redirect and new_redirect: + # If redirect action is added, check that there's no contract that + # already has a redirect action + for prs in context._plugin.get_policy_rule_sets( + context._plugin_context, + {'id': context.current['policy_rule_sets']}): + # Make sure the PRS can have a new redirect action + self._validate_new_prs_redirect(context, prs) + + def update_policy_rule_postcommit(self, context): + self._update_policy_rule_on_apic(context) + + def update_policy_action_postcommit(self, context): + pass + + def _update_policy_rule_on_apic(self, context): + self._delete_policy_rule_from_apic(context, transaction=None) + # The following only creates the APIC reference + self.create_policy_rule_postcommit(context, transaction=None) def update_policy_target_group_precommit(self, context): - if set(context.original['subnets']) - set(context.current['subnets']): - raise gpexc.PolicyTargetGroupSubnetRemovalNotSupported() + if set(context.original['subnets']) != set(context.current['subnets']): + raise ExplicitSubnetAssociationNotSupported() self._reject_shared_update(context, 'policy_target_group') def update_policy_target_group_postcommit(self, context): @@ -410,28 +540,10 @@ class ApicMappingDriver(api.ResourceMappingDriver): set(orig_consumed_policy_rule_sets) - set( curr_consumed_policy_rule_sets)) - orig_subnets = context.original['subnets'] - curr_subnets = context.current['subnets'] - new_subnets = list(set(curr_subnets) - set(orig_subnets)) - removed_subnets = list(set(orig_subnets) - set(curr_subnets)) - - with self.apic_manager.apic.transaction(None) as trs: - self._manage_ptg_policy_rule_sets( - context._plugin_context, context.current, - new_provided_policy_rule_sets, new_consumed_policy_rule_sets, - removed_provided_policy_rule_sets, - removed_consumed_policy_rule_sets, transaction=trs) - - new_subnets = self._subnet_ids_to_objects( - context._plugin_context, new_subnets) - removed_subnets = self._subnet_ids_to_objects( - context._plugin_context, removed_subnets) - - self._manage_ptg_subnets(context._plugin_context, context.current, - new_subnets, removed_subnets) - self._update_default_security_group( - context._plugin_context, context.current['id'], - context.current['tenant_id'], subnets=new_subnets) + self._manage_ptg_policy_rule_sets( + context, new_provided_policy_rule_sets, + new_consumed_policy_rule_sets, removed_provided_policy_rule_sets, + removed_consumed_policy_rule_sets) def update_l3_policy_precommit(self, context): self._reject_shared_update(context, 'l3_policy') @@ -464,6 +576,43 @@ class ApicMappingDriver(api.ResourceMappingDriver): for es in removed_ess: self._unplug_l3p_from_es(context, es) + def create_policy_classifier_precommit(self, context): + pass + + def create_policy_classifier_postcommit(self, context): + pass + + def update_policy_classifier_precommit(self, context): + pass + + def update_policy_classifier_postcommit(self, context): + admin_context = nctx.get_admin_context() + if not context.current['policy_rules']: + return + rules = context._plugin.get_policy_rules( + admin_context, + filters={'id': context.current['policy_rules']}) + # Rewrite the rule on the APIC + for rule in rules: + rule_context = group_policy_context.PolicyRuleContext( + context._plugin, context._plugin_context, rule) + self._update_policy_rule_on_apic(rule_context) + # If direction or protocol changed, the contracts should be updated + o_dir = context.original['direction'] + c_dir = context.current['direction'] + o_prot = context.original['protocol'] + c_prot = context.current['protocol'] + # TODO(ivar): Optimize by aggregating on PRS ID + if ((o_dir != c_dir) or + ((o_prot in REVERTIBLE_PROTOCOLS) != + (c_prot in REVERTIBLE_PROTOCOLS))): + for prs in context._plugin.get_policy_rule_sets( + admin_context, + filters={'id': rule['policy_rule_sets']}): + self._remove_policy_rule_set_rules( + context, prs, [(rule, context.original)]) + self._apply_policy_rule_set_rules(context, prs, [rule]) + def create_external_segment_precommit(self, context): if context.current['port_address_translation']: raise PATNotSupportedByApicDriver() @@ -612,35 +761,40 @@ class ApicMappingDriver(api.ResourceMappingDriver): def process_subnet_changed(self, context, old, new): if old['gateway_ip'] != new['gateway_ip']: - ptg = self._subnet_to_ptg(context, new['id']) - if ptg: + l2p = self._network_id_to_l2p(context, new['network_id']) + if l2p: # Is GBP owned, reflect on APIC - self._manage_ptg_subnets(context, ptg, [new], [old]) + self._manage_l2p_subnets(context, l2p['id'], [new], [old]) + + def process_subnet_added(self, context, subnet): + l2p = self._network_id_to_l2p(context, subnet['network_id']) + if l2p: + self._sync_epg_subnets(context, l2p) + self._manage_l2p_subnets(context, l2p['id'], [subnet], []) + + def process_subnet_deleted(self, context, subnet): + l2p = self._network_id_to_l2p(context, subnet['network_id']) + if l2p: + self._manage_l2p_subnets(context, l2p['id'], [], [subnet]) def process_port_changed(self, context, old, new): - # Port's EP can't change unless EP is deleted/created, therefore the - # binding will mostly be the same except for the host - if old['binding:host_id'] != new['binding:host_id']: - pt = self._port_id_to_pt(context, new['id']) - if pt: - if old['binding:host_id']: - self.process_path_deletion(context, old) - self._manage_policy_target_port(context, pt) + pass - def process_path_deletion(self, context, port, policy_target=None): - port_details = self.get_gbp_details( - context, port_id=port['id'], host=port['binding:host_id'], - policy_target=policy_target) - self._delete_path_if_last(context, port_details) + def process_pre_port_deleted(self, context, port): + pt = self._port_id_to_pt(context, port['id']) + if pt: + context.policy_target_id = pt['id'] + + def process_port_deleted(self, context, port): + try: + self.gbp_plugin.delete_policy_target( + context, context.policy_target_id) + except AttributeError: + pass def _apply_policy_rule_set_rules( self, context, policy_rule_set, policy_rules, transaction=None): - # TODO(ivar): refactor parent to avoid code duplication - if policy_rule_set['parent_id']: - parent = context._plugin.get_policy_rule_set( - context._plugin_context, policy_rule_set['parent_id']) - policy_rules = policy_rules & set(parent['policy_rules']) - # Don't add rules unallowed by the parent + # TODO(ivar): parent contract filtering when supported self._manage_policy_rule_set_rules( context, policy_rule_set, policy_rules, transaction=transaction) @@ -652,19 +806,26 @@ class ApicMappingDriver(api.ResourceMappingDriver): def _manage_policy_rule_set_rules( self, context, policy_rule_set, policy_rules, unset=False, - transaction=None): + transaction=None, classifier=None): # REVISIT(ivar): figure out what should be moved in apicapi instead if policy_rules: tenant = self._tenant_by_sharing_policy(policy_rule_set) - contract = self.name_mapper.policy_rule_set(context, - context.current['id']) + contract = self.name_mapper.policy_rule_set( + context, policy_rule_set['id']) in_dir = [g_const.GP_DIRECTION_BI, g_const.GP_DIRECTION_IN] out_dir = [g_const.GP_DIRECTION_BI, g_const.GP_DIRECTION_OUT] for rule in policy_rules: + if isinstance(rule, tuple): + classifier = rule[1] + rule = rule[0] + else: + classifier = context._plugin.get_policy_classifier( + context._plugin_context, + rule['policy_classifier_id']) policy_rule = self.name_mapper.policy_rule(context, rule['id']) + reverse_policy_rule = self.name_mapper.policy_rule( + context, rule['id'], prefix=REVERSE_PREFIX) rule_owner = self._tenant_by_sharing_policy(rule) - classifier = context._plugin.get_policy_classifier( - context._plugin_context, rule['policy_classifier_id']) with self.apic_manager.apic.transaction(transaction) as trs: if classifier['direction'] in in_dir: # PRS and subject are the same thing in this case @@ -672,73 +833,75 @@ class ApicMappingDriver(api.ResourceMappingDriver): contract, contract, policy_rule, owner=tenant, transaction=trs, unset=unset, rule_owner=rule_owner) + if (classifier['protocol'].lower() in + REVERTIBLE_PROTOCOLS): + (self.apic_manager. + manage_contract_subject_out_filter( + contract, contract, reverse_policy_rule, + owner=tenant, transaction=trs, unset=unset, + rule_owner=rule_owner)) if classifier['direction'] in out_dir: # PRS and subject are the same thing in this case self.apic_manager.manage_contract_subject_out_filter( contract, contract, policy_rule, owner=tenant, transaction=trs, unset=unset, rule_owner=rule_owner) - - @lockutils.synchronized('apic-portlock') - def _manage_policy_target_port(self, plugin_context, pt): - port = self._core_plugin.get_port(plugin_context, pt['port_id']) - if port.get('binding:host_id'): - port_details = self.get_gbp_details( - plugin_context, port_id=port['id'], - host=port['binding:host_id']) - if port_details: - # TODO(ivar): change APICAPI to not expect a resource context - plugin_context._plugin = self.gbp_plugin - plugin_context._plugin_context = plugin_context - ptg_object = self.gbp_plugin.get_policy_target_group( - plugin_context, port_details['ptg_id']) - tenant_id = self._tenant_by_sharing_policy(ptg_object) - epg = self.name_mapper.policy_target_group( - plugin_context, port_details['ptg_id']) - bd = self.name_mapper.l2_policy( - plugin_context, port_details['l2_policy_id']) - seg = port_details['segmentation_id'] - # Create a static path attachment for the host/epg/switchport - with self.apic_manager.apic.transaction() as trs: - self.apic_manager.ensure_path_created_for_port( - tenant_id, epg, port['binding:host_id'], seg, - bd_name=bd, - transaction=trs) + if (classifier['protocol'].lower() in + REVERTIBLE_PROTOCOLS): + (self.apic_manager. + manage_contract_subject_in_filter( + contract, contract, reverse_policy_rule, + owner=tenant, transaction=trs, unset=unset, + rule_owner=rule_owner)) def _manage_ptg_policy_rule_sets( - self, plugin_context, ptg, added_provided, added_consumed, + self, ptg_context, added_provided, added_consumed, removed_provided, removed_consumed, transaction=None): + context = ptg_context + plugin_context = context._plugin_context + ptg = context.current + ptg_params = [] + # TODO(ivar): change APICAPI to not expect a resource context plugin_context._plugin = self.gbp_plugin plugin_context._plugin_context = plugin_context mapped_tenant = self._tenant_by_sharing_policy(ptg) - mapped_ptg = self.name_mapper.policy_target_group(plugin_context, - ptg['id']) + mapped_ptg = self.name_mapper.policy_target_group( + plugin_context, ptg['id']) + ptg_params.append((mapped_tenant, mapped_ptg)) provided = [added_provided, removed_provided] consumed = [added_consumed, removed_consumed] methods = [self.apic_manager.set_contract_for_epg, self.apic_manager.unset_contract_for_epg] - with self.apic_manager.apic.transaction(transaction) as trs: - for x in xrange(len(provided)): - for c in self.gbp_plugin.get_policy_rule_sets( - plugin_context, filters={'id': provided[x]}): - c_owner = self._tenant_by_sharing_policy(c) - c = self.name_mapper.policy_rule_set(plugin_context, - c['id']) - methods[x](mapped_tenant, mapped_ptg, c, provider=True, - contract_owner=c_owner, transaction=trs) - for x in xrange(len(consumed)): - for c in self.gbp_plugin.get_policy_rule_sets( - plugin_context, filters={'id': consumed[x]}): - c_owner = self._tenant_by_sharing_policy(c) - c = self.name_mapper.policy_rule_set(plugin_context, - c['id']) - methods[x](mapped_tenant, mapped_ptg, c, provider=False, - contract_owner=c_owner, transaction=trs) + + for x in xrange(len(provided)): + for c in self.gbp_plugin.get_policy_rule_sets( + plugin_context, filters={'id': provided[x]}): + c_owner = self._tenant_by_sharing_policy(c) + c = self.name_mapper.policy_rule_set(plugin_context, + c['id']) + for params in ptg_params: + methods[x](params[0], params[1], c, provider=True, + contract_owner=c_owner, transaction=None) + for x in xrange(len(consumed)): + for c in self.gbp_plugin.get_policy_rule_sets( + plugin_context, filters={'id': consumed[x]}): + c_owner = self._tenant_by_sharing_policy(c) + c = self.name_mapper.policy_rule_set(plugin_context, + c['id']) + for params in ptg_params: + methods[x](params[0], params[1], c, provider=False, + contract_owner=c_owner, transaction=None) def _manage_ep_policy_rule_sets( self, plugin_context, es, ep, added_provided, added_consumed, removed_provided, removed_consumed, transaction=None): + + ext_info = self.apic_manager.ext_net_dict.get(es['name']) + if not ext_info: + LOG.warn(_("External Segment %s is not managed by APIC " + "mapping driver.") % es['id']) + return plugin_context._plugin = self.gbp_plugin plugin_context._plugin_context = plugin_context mapped_tenant = self._tenant_by_sharing_policy(es) @@ -764,16 +927,15 @@ class ApicMappingDriver(api.ResourceMappingDriver): owner=mapped_tenant, provided=False, transaction=trs) - def _manage_ptg_subnets(self, plugin_context, ptg, added_subnets, + def _manage_l2p_subnets(self, plugin_context, l2p_id, added_subnets, removed_subnets, transaction=None): # TODO(ivar): change APICAPI to not expect a resource context plugin_context._plugin = self.gbp_plugin plugin_context._plugin_context = plugin_context l2_policy_object = self.gbp_plugin.get_l2_policy( - plugin_context, ptg['l2_policy_id']) + plugin_context, l2p_id) mapped_tenant = self._tenant_by_sharing_policy(l2_policy_object) - mapped_l2p = self.name_mapper.l2_policy(plugin_context, - ptg['l2_policy_id']) + mapped_l2p = self.name_mapper.l2_policy(plugin_context, l2p_id) subnets = [added_subnets, removed_subnets] methods = [self.apic_manager.ensure_subnet_created_on_apic, self.apic_manager.ensure_subnet_deleted_on_apic] @@ -783,51 +945,6 @@ class ApicMappingDriver(api.ResourceMappingDriver): methods[x](mapped_tenant, mapped_l2p, self._gateway_ip(s), transaction=trs) - def _get_active_path_count(self, plugin_context, port_info): - return (plugin_context.session.query(models.PortBindingLevel). - join(models.NetworkSegment). - filter(models.PortBindingLevel.host == port_info['host']). - filter(models.NetworkSegment.segmentation_id == port_info[ - 'segmentation_id']). - filter(models.PortBindingLevel.port_id != port_info['port_id']). - count()) - - @lockutils.synchronized('apic-portlock') - def _delete_port_path(self, context, atenant_id, ptg, port_info): - if not self._get_active_path_count(context, port_info): - self.apic_manager.ensure_path_deleted_for_port( - atenant_id, ptg, port_info['host']) - - def _delete_path_if_last(self, context, port_info): - if not self._get_active_path_count(context, port_info): - # TODO(ivar): change APICAPI to not expect a resource context - context._plugin = self.gbp_plugin - context._plugin_context = context - ptg_object = self.gbp_plugin.get_policy_target_group( - context, port_info['ptg_id']) - atenant_id = self._tenant_by_sharing_policy(ptg_object) - epg = self.name_mapper.policy_target_group(context, - port_info['ptg_id']) - self._delete_port_path(context, atenant_id, epg, port_info) - - def _get_default_security_group(self, context, ptg_id, tenant_id): - # Default SG in APIC mapping is per tenant, and allows all the traffic - # since the contracts will be enforced by ACI and not via SG - filters = {'name': ['gbp_apic_default'], 'tenant_id': [tenant_id]} - default_group = self._core_plugin.get_security_groups( - context, filters) - if not default_group: - attrs = {'name': 'gbp_apic_default', 'tenant_id': tenant_id, - 'description': 'default apic sg'} - ret = self._create_sg(context, attrs) - for ethertype in ext_sg.sg_supported_ethertypes: - for direction in ['ingress', 'egress']: - self._sg_rule(context, tenant_id, ret['id'], direction, - ethertype=ethertype) - return ret['id'] - else: - return default_group[0]['id'] - def _update_default_security_group(self, plugin_context, ptg_id, tenant_id, subnets=None): pass @@ -846,8 +963,8 @@ class ApicMappingDriver(api.ResourceMappingDriver): return self._core_plugin.get_subnets(plugin_context, filters={'id': ids}) - def _port_to_ptg_network(self, context, port, host=None): - ptg = self._port_id_to_ptg(context, port['id']) + def _port_to_ptg_network(self, context, port_id): + ptg = self._port_id_to_ptg(context, port_id) if not ptg: # Not GBP port return None, None @@ -875,7 +992,9 @@ class ApicMappingDriver(api.ResourceMappingDriver): def _network_id_to_l2p(self, context, network_id): l2ps = self.gbp_plugin.get_l2_policies( context, filters={'network_id': [network_id]}) - return l2ps[0] if l2ps else None + for l2p in l2ps: + if l2p['network_id'] == network_id: + return l2p def _subnet_to_ptg(self, context, subnet_id): ptg = (context.session.query(gpdb.PolicyTargetGroupMapping). @@ -898,8 +1017,13 @@ class ApicMappingDriver(api.ResourceMappingDriver): "driver.") % es['id']) return ip = external_segments[es['id']] - ip = ip[0] if (ip and ip[0]) else ext_info.get('cidr_exposed', - '/').split('/')[0] + if ip and ip[0]: + ip = ip[0] + exposed = ip + '/' + es['cidr'].split('/')[1] + else: + ip = ext_info.get('cidr_exposed', '/').split('/')[0] + exposed = ext_info.get('cidr_exposed') + if not ip: raise NoAddressConfiguredOnExternalSegment( l3p_id=context.current['id'], es_id=es['id']) @@ -920,8 +1044,8 @@ class ApicMappingDriver(api.ResourceMappingDriver): transaction=trs) self.apic_manager.ensure_logical_node_profile_created( es_name, switch, module, sport, encap, - ip, owner=es_tenant, router_id=router_id, - transaction=trs) + exposed, owner=es_tenant, + router_id=router_id, transaction=trs) for route in es['external_routes']: self.apic_manager.ensure_static_route_created( es_name, switch, route['nexthop'] or default_gateway, @@ -1017,3 +1141,310 @@ class ApicMappingDriver(api.ResourceMappingDriver): return self.name_mapper.tenant(None, object['tenant_id']) else: return apic_manager.TENANT_COMMON + + def _notify_port_update(self, plugin_context, port_id): + try: + port = self._core_plugin.get_port(plugin_context, port_id) + if self._is_port_bound(port): + self.notifier.port_update(plugin_context, port) + except n_exc.PortNotFound: + # Notification not needed + pass + + def _get_port_network_type(self, context, port): + try: + network = self._core_plugin.get_network(context, + port['network_id']) + return network['provider:network_type'] + except n_exc.NetworkNotFound: + pass + + def _is_apic_network_type(self, context, port): + return (self._get_port_network_type(context, port) == + ofcst.TYPE_OPFLEX) + + def _is_port_bound(self, port): + return port.get(portbindings.VIF_TYPE) not in [ + portbindings.VIF_TYPE_UNBOUND, + portbindings.VIF_TYPE_BINDING_FAILED] + + def _use_implicit_subnet(self, context, force_add=False): + """Implicit subnet for APIC driver. + + The first PTG of a given BD will allocate a new subnet from the L3P. + Any subsequent PTG in the same BD will use the same subnet. + More subnets will be allocated whenever the existing ones go out of + addresses. + """ + l2p_id = context.current['l2_policy_id'] + with lockutils.lock(l2p_id, external=True): + subs = self._get_l2p_subnets(context._plugin_context, l2p_id) + subs = set([x['id'] for x in subs]) + added = None + if not subs or force_add: + added = self._internal_use_implicit_subnet(context) + subs.add(added['id']) + context.add_subnets(subs - set(context.current['subnets'])) + if added: + self.process_subnet_added(context._plugin_context, added) + + def _internal_use_implicit_subnet(self, context): + l2p_id = context.current['l2_policy_id'] + l2p = context._plugin.get_l2_policy(context._plugin_context, l2p_id) + l3p_id = l2p['l3_policy_id'] + l3p = context._plugin.get_l3_policy(context._plugin_context, l3p_id) + pool = netaddr.IPNetwork(l3p['ip_pool']) + + admin_context = nctx.get_admin_context() + l2ps = context._plugin.get_l2_policies( + admin_context, filters={'l3_policy_id': [l3p['id']]}) + ptgs = context._plugin.get_policy_target_groups( + admin_context, filters={'l2_policy_id': [x['id'] for x in l2ps]}) + subnets = [] + for ptg in ptgs: + subnets.extend(ptg['subnets']) + subnets = self._core_plugin.get_subnets(admin_context, + filters={'id': subnets}) + for cidr in pool.subnet(l3p['subnet_prefix_length']): + if not self._validate_subnet_overlap_for_l3p(subnets, + cidr.__str__()): + continue + try: + attrs = {'tenant_id': context.current['tenant_id'], + 'name': APIC_OWNED + l2p['name'], + 'network_id': l2p['network_id'], + 'ip_version': l3p['ip_version'], + 'cidr': cidr.__str__(), + 'enable_dhcp': True, + 'gateway_ip': attributes.ATTR_NOT_SPECIFIED, + 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, + 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, + 'host_routes': attributes.ATTR_NOT_SPECIFIED} + subnet = self._create_subnet(context._plugin_context, attrs) + return subnet + except n_exc.BadRequest: + # This is expected (CIDR overlap) until we have a + # proper subnet allocation algorithm. We ignore the + # exception and repeat with the next CIDR. + pass + raise gpexc.NoSubnetAvailable() + + def _sync_epg_subnets(self, plugin_context, l2p): + l2p_subnets = [x['id'] for x in + self._get_l2p_subnets(plugin_context, l2p['id'])] + epgs = self.gbp_plugin.get_policy_target_groups( + nctx.get_admin_context(), {'l2_policy_id': [l2p['id']]}) + for sub in l2p_subnets: + # Add to EPG + for epg in epgs: + if sub not in epg['subnets']: + try: + (self.gbp_plugin. + _add_subnet_to_policy_target_group( + nctx.get_admin_context(), epg['id'], sub)) + except gpolicy.PolicyTargetGroupNotFound as e: + LOG.warn(e) + + def _get_l2p_subnets(self, plugin_context, l2p_id): + l2p = self.gbp_plugin.get_l2_policy(plugin_context, l2p_id) + return self._core_plugin.get_subnets( + plugin_context, {'network_id': [l2p['network_id']]}) + + def _configure_implicit_contract(self, context, l2p, transaction=None): + with self.apic_manager.apic.transaction(transaction) as trs: + tenant = self._tenant_by_sharing_policy(l2p) + # Create Service contract + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=IMPLICIT_PREFIX) + self.apic_manager.create_contract( + contract, owner=tenant, transaction=trs) + + # Create ARP filter/subject + attrs = {'etherT': 'arp'} + self._associate_service_filter(tenant, contract, 'arp', + 'arp', transaction=trs, **attrs) + + def _configure_shadow_epg(self, context, l2p, bd_name, transaction=None): + with self.apic_manager.apic.transaction(transaction) as trs: + tenant = self._tenant_by_sharing_policy(l2p) + shadow_epg = self.name_mapper.l2_policy( + context, l2p['id'], prefix=SHADOW_PREFIX) + self.apic_manager.ensure_epg_created( + tenant, shadow_epg, bd_owner=tenant, bd_name=bd_name, + transaction=trs) + + # Create Service contract + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=SERVICE_PREFIX) + self.apic_manager.create_contract( + contract, owner=tenant, transaction=trs) + + # Shadow EPG provides this contract + self.apic_manager.set_contract_for_epg( + tenant, shadow_epg, contract, provider=True, + contract_owner=tenant, transaction=trs) + + # Create DNS filter/subject + attrs = {'etherT': 'ip', + 'prot': 'udp', + 'dToPort': 'dns', + 'dFromPort': 'dns'} + self._associate_service_filter(tenant, contract, 'dns', + 'dns', transaction=trs, **attrs) + attrs = {'etherT': 'ip', + 'prot': 'udp', + 'sToPort': 'dns', + 'sFromPort': 'dns'} + self._associate_service_filter(tenant, contract, 'dns', + 'r-dns', transaction=trs, **attrs) + + # Create HTTP filter/subject + attrs = {'etherT': 'ip', + 'prot': 'tcp', + 'dToPort': 80, + 'dFromPort': 80} + self._associate_service_filter(tenant, contract, 'http', + 'http', transaction=trs, **attrs) + attrs = {'etherT': 'ip', + 'prot': 'tcp', + 'sToPort': 80, + 'sFromPort': 80} + self._associate_service_filter(tenant, contract, 'http', + 'r-http', transaction=trs, **attrs) + + attrs = {'etherT': 'ip', + 'prot': 'icmp'} + self._associate_service_filter(tenant, contract, 'icmp', + 'icmp', transaction=trs, **attrs) + + # Create DHCP filter/subject + attrs = {'etherT': 'ip', + 'prot': 'udp', + 'dToPort': 68, + 'dFromPort': 68, + 'sToPort': 67, + 'sFromPort': 67} + self._associate_service_filter(tenant, contract, 'dhcp', + 'dhcp', transaction=trs, **attrs) + attrs = {'etherT': 'ip', + 'prot': 'udp', + 'dToPort': 67, + 'dFromPort': 67, + 'sToPort': 68, + 'sFromPort': 68} + self._associate_service_filter(tenant, contract, 'dhcp', + 'r-dhcp', transaction=trs, **attrs) + + # Create ARP filter/subject + attrs = {'etherT': 'arp'} + self._associate_service_filter(tenant, contract, 'arp', + 'arp', transaction=trs, **attrs) + + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=IMPLICIT_PREFIX) + # Shadow EPG provides and consumes implicit contract + self.apic_manager.set_contract_for_epg( + tenant, shadow_epg, contract, provider=False, + contract_owner=tenant, transaction=trs) + self.apic_manager.set_contract_for_epg( + tenant, shadow_epg, contract, provider=True, + contract_owner=tenant, transaction=trs) + + def _associate_service_filter(self, tenant, contract, filter_name, + entry_name, transaction=None, **attrs): + with self.apic_manager.apic.transaction(transaction) as trs: + filter_name = '%s-%s' % (str(self.apic_manager.app_profile_name), + filter_name) + self.apic_manager.create_tenant_filter( + filter_name, owner=tenant, entry=entry_name, + transaction=trs, **attrs) + self.apic_manager.manage_contract_subject_bi_filter( + contract, contract, filter_name, owner=tenant, + transaction=trs, rule_owner=tenant) + + def _delete_shadow_epg(self, context, l2p, transaction=None): + with self.apic_manager.apic.transaction(transaction) as trs: + tenant = self._tenant_by_sharing_policy(l2p) + shadow_epg = self.name_mapper.l2_policy( + context, l2p['id'], prefix=SHADOW_PREFIX) + self.apic_manager.delete_epg_for_network( + tenant, shadow_epg, transaction=trs) + + # Delete Service Contract + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=SERVICE_PREFIX) + self.apic_manager.delete_contract( + contract, owner=tenant, transaction=trs) + + def _delete_implicit_contract(self, context, l2p, transaction=None): + with self.apic_manager.apic.transaction(transaction) as trs: + tenant = self._tenant_by_sharing_policy(l2p) + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=IMPLICIT_PREFIX) + self.apic_manager.delete_contract( + contract, owner=tenant, transaction=trs) + + def _configure_epg_service_contract(self, context, ptg, l2p, epg_name, + transaction=None): + with self.apic_manager.apic.transaction(transaction) as trs: + contract_owner = self._tenant_by_sharing_policy(l2p) + tenant = self._tenant_by_sharing_policy(ptg) + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=SERVICE_PREFIX) + self.apic_manager.set_contract_for_epg( + tenant, epg_name, contract, provider=False, + contract_owner=contract_owner, transaction=trs) + + def _configure_epg_implicit_contract(self, context, ptg, l2p, epg_name, + transaction=None): + with self.apic_manager.apic.transaction(transaction) as trs: + contract_owner = self._tenant_by_sharing_policy(l2p) + tenant = self._tenant_by_sharing_policy(ptg) + contract = self.name_mapper.l2_policy( + context, l2p['id'], prefix=IMPLICIT_PREFIX) + self.apic_manager.set_contract_for_epg( + tenant, epg_name, contract, provider=False, + contract_owner=contract_owner, transaction=trs) + self.apic_manager.set_contract_for_epg( + tenant, epg_name, contract, provider=True, + contract_owner=contract_owner, transaction=trs) + + def _get_redirect_action(self, context, policy_rule): + for action in context._plugin.get_policy_actions( + context._plugin_context, + filters={'id': policy_rule['policy_actions']}): + if action['action_type'] == g_const.GP_ACTION_REDIRECT: + return action + + def _validate_new_prs_redirect(self, context, prs): + if self._prss_redirect_rules(context._plugin_context.session, + [prs['id']]) > 1: + raise gpexc.MultipleRedirectActionsNotSupportedForPRS() + + def _prss_redirect_rules(self, session, prs_ids): + if len(prs_ids) == 0: + # No result will be found in this case + return 0 + query = (session.query(gpdb.gpdb.PolicyAction). + join(gpdb.gpdb.PolicyRuleActionAssociation). + join(gpdb.gpdb.PolicyRule). + join(gpdb.gpdb.PRSToPRAssociation). + filter( + gpdb.gpdb.PRSToPRAssociation.policy_rule_set_id.in_(prs_ids)). + filter(gpdb.gpdb.PolicyAction.action_type == + g_const.GP_ACTION_REDIRECT)) + return query.count() + + def _multiple_pr_redirect_action_number(self, session, pr_ids): + # Given a set of rules, gives the total number of redirect actions + # found + if len(pr_ids) == 0: + # No result will be found in this case + return 0 + return (session.query(gpdb.gpdb.PolicyAction). + join(gpdb.gpdb.PolicyRuleActionAssociation). + filter( + gpdb.gpdb.PolicyRuleActionAssociation.policy_rule_id.in_( + pr_ids)). + filter(gpdb.gpdb.PolicyAction.action_type == + g_const.GP_ACTION_REDIRECT)).count() diff --git a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py new file mode 100644 index 000000000..0ccb195c6 --- /dev/null +++ b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneclient import auth as ks_auth +from keystoneclient import session as ks_session +from neutron.notifiers import nova as n_nova +from novaclient import client as nclient +from novaclient import exceptions as nova_exceptions +from oslo_config import cfg +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class NovaClient: + + def __init__(self): + + auth = ks_auth.load_from_conf_options(cfg.CONF, 'nova') + endpoint_override = None + + if not auth: + + if cfg.CONF.nova_admin_tenant_id: + endpoint_override = "%s/%s" % (cfg.CONF.nova_url, + cfg.CONF.nova_admin_tenant_id) + + auth = n_nova.DefaultAuthPlugin( + auth_url=cfg.CONF.nova_admin_auth_url, + username=cfg.CONF.nova_admin_username, + password=cfg.CONF.nova_admin_password, + tenant_id=cfg.CONF.nova_admin_tenant_id, + tenant_name=cfg.CONF.nova_admin_tenant_name, + endpoint_override=endpoint_override) + + session = ks_session.Session.load_from_conf_options( + cfg.CONF, 'nova', auth=auth) + novaclient_cls = nclient.get_client_class(n_nova.NOVA_API_VERSION) + + self.nclient = novaclient_cls( + session=session, + region_name=cfg.CONF.nova.region_name) + + def get_server(self, server_id): + try: + return self.client.servers.get(server_id) + except nova_exceptions.NotFound: + LOG.warning(_("Nova returned NotFound for server: %s"), + server_id) + except Exception as e: + LOG.exception(e) diff --git a/gbpservice/neutron/services/grouppolicy/group_policy_context.py b/gbpservice/neutron/services/grouppolicy/group_policy_context.py index 32cb7b249..d417020ed 100644 --- a/gbpservice/neutron/services/grouppolicy/group_policy_context.py +++ b/gbpservice/neutron/services/grouppolicy/group_policy_context.py @@ -95,6 +95,10 @@ class PolicyTargetGroupContext(GroupPolicyContext, self._plugin_context, self._policy_target_group['id'], subnet_id) self._policy_target_group['subnets'] = subnets + def add_subnets(self, subnet_ids): + for subnet_id in subnet_ids: + self.add_subnet(subnet_id) + class L2PolicyContext(GroupPolicyContext, api.L2PolicyContext): diff --git a/gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py b/gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py index b6cd60f78..a18116f61 100644 --- a/gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py +++ b/gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py @@ -18,6 +18,7 @@ import mock import netaddr import webob.exc +from neutron.agent import securitygroups_rpc as sg_cfg from neutron.common import rpc as n_rpc from neutron import context from neutron.db import api as db_api @@ -26,16 +27,18 @@ from neutron import manager from neutron.tests.unit.plugins.ml2.drivers.cisco.apic import ( base as mocked) from neutron.tests.unit.plugins.ml2 import test_plugin +from opflexagent import constants as ocst from oslo_config import cfg sys.modules["apicapi"] = mock.Mock() +from gbpservice.neutron.services.grouppolicy import ( + group_policy_context as p_context) from gbpservice.neutron.services.grouppolicy import config from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( apic_mapping as amap) from gbpservice.neutron.tests.unit.services.grouppolicy import ( - test_grouppolicy_plugin as test_gp_plugin) - + test_resource_mapping as test_rmd) APIC_L2_POLICY = 'l2_policy' APIC_L3_POLICY = 'l3_policy' @@ -45,9 +48,15 @@ APIC_POLICY_RULE = 'policy_rule' APIC_EXTERNAL_RID = '1.0.0.1' +AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS +AGENT_CONF = {'alive': True, 'binary': 'somebinary', + 'topic': 'sometopic', 'agent_type': AGENT_TYPE, + 'configurations': {'opflex_networks': None, + 'bridge_mappings': {'physnet1': 'br-eth1'}}} -def echo(context, string): - return string + +def echo(context, string, prefix=''): + return prefix + string class MockCallRecorder(mock.Mock): @@ -65,23 +74,35 @@ class MockCallRecorder(mock.Mock): class ApicMappingTestCase( - test_gp_plugin.GroupPolicyPluginTestCase, + test_rmd.ResourceMappingTestCase, mocked.ControllerMixin, mocked.ConfigMixin): def setUp(self): - config.cfg.CONF.set_override('policy_drivers', - ['implicit_policy', 'apic'], - group='group_policy') + cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP') + config.cfg.CONF.set_override('enable_security_group', False, + group='SECURITYGROUP') n_rpc.create_connection = mock.Mock() amap.ApicMappingDriver.get_apic_manager = mock.Mock() self.set_up_mocks() ml2_opts = { - 'mechanism_drivers': ['openvswitch', 'apic_gbp'], + 'mechanism_drivers': ['apic_gbp'], + 'type_drivers': ['opflex'], + 'tenant_network_types': ['opflex'] } - for opt, val in ml2_opts.items(): - cfg.CONF.set_override(opt, val, 'ml2') + mock.patch('gbpservice.neutron.services.grouppolicy.drivers.cisco.' + 'apic.apic_mapping.ApicMappingDriver._setup_rpc').start() + host_agents = mock.patch('neutron.plugins.ml2.driver_context.' + 'PortContext.host_agents').start() + host_agents.return_value = [AGENT_CONF] + nova_client = mock.patch( + 'gbpservice.neutron.services.grouppolicy.drivers.cisco.' + 'apic.nova_client.NovaClient.get_server').start() + vm = mock.Mock() + vm.name = 'someid' + nova_client.return_value = vm super(ApicMappingTestCase, self).setUp( - core_plugin=test_plugin.PLUGIN_NAME) + policy_drivers=['implicit_policy', 'apic'], + core_plugin=test_plugin.PLUGIN_NAME, ml2_options=ml2_opts) engine = db_api.get_engine() model_base.BASEV2.metadata.create_all(engine) plugin = manager.NeutronManager.get_plugin() @@ -103,6 +124,7 @@ class ApicMappingTestCase( self.driver.apic_manager = mock.Mock(name_mapper=mock.Mock(), ext_net_dict={}) self.driver.apic_manager.apic.transaction = self.fake_transaction + self.driver.notifier = mock.Mock() amap.apic_manager.TENANT_COMMON = 'common' self.common_tenant = amap.apic_manager.TENANT_COMMON @@ -127,34 +149,40 @@ class ApicMappingTestCase( msg='Call not found, expected:\n%s\nobserved:' '\n%s' % (str(call), str(observed))) observed.remove(call) - self.assertFalse(len(observed)) + self.assertFalse( + len(observed), + msg='There are more calls than expected: %s' % str(observed)) + + def _create_simple_policy_rule(self, direction='bi', protocol='tcp', + port_range=80, shared=False, + action_type='allow', action_value=None): + cls = self.create_policy_classifier( + direction=direction, protocol=protocol, + port_range=port_range, shared=shared)['policy_classifier'] + + action = self.create_policy_action( + action_type=action_type, shared=shared, + action_value=action_value)['policy_action'] + return self.create_policy_rule( + policy_classifier_id=cls['id'], policy_actions=[action['id']], + shared=shared)['policy_rule'] + + def _bind_port_to_host(self, port_id, host): + plugin = manager.NeutronManager.get_plugin() + ctx = context.get_admin_context() + agent = {'host': host} + agent.update(AGENT_CONF) + plugin.create_or_update_agent(ctx, agent) + data = {'port': {'binding:host_id': host, 'device_owner': 'compute:', + 'device_id': 'someid'}} + # Create EP with bound port + req = self.new_update_request('ports', data, port_id, + self.fmt) + return self.deserialize(self.fmt, req.get_response(self.api)) class TestPolicyTarget(ApicMappingTestCase): - def test_policy_target_created_on_apic(self): - ptg = self.create_policy_target_group()['policy_target_group'] - subnet = self._get_object('subnets', ptg['subnets'][0], self.api) - with self.port(subnet=subnet) as port: - self._bind_port_to_host(port['port']['id'], 'h1') - self.create_policy_target(policy_target_group_id=ptg['id'], - port_id=port['port']['id']) - mgr = self.driver.apic_manager - self.assertEqual(mgr.ensure_path_created_for_port.call_count, 1) - - def test_policy_target_port_update_on_apic_none_to_host(self): - ptg = self.create_policy_target_group( - name="ptg1")['policy_target_group'] - pt = self.create_policy_target( - policy_target_group_id=ptg['id'])['policy_target'] - port = self._get_object('ports', pt['port_id'], self.api) - port_up = self._bind_port_to_host(port['port']['id'], 'h1') - - self.driver.process_port_changed(context.get_admin_context(), - port['port'], port_up['port']) - mgr = self.driver.apic_manager - self.assertEqual(mgr.ensure_path_created_for_port.call_count, 1) - def test_policy_target_port_deleted_on_apic(self): ptg = self.create_policy_target_group()['policy_target_group'] subnet = self._get_object('subnets', ptg['subnets'][0], self.api) @@ -165,8 +193,7 @@ class TestPolicyTarget(ApicMappingTestCase): self.new_delete_request( 'policy_targets', pt['policy_target']['id'], self.fmt).get_response(self.ext_api) - mgr = self.driver.apic_manager - self.assertEqual(mgr.ensure_path_deleted_for_port.call_count, 1) + self.assertTrue(self.driver.notifier.port_update.called) def test_policy_target_delete_no_port(self): ptg = self.create_policy_target_group()['policy_target_group'] @@ -179,46 +206,92 @@ class TestPolicyTarget(ApicMappingTestCase): self.fmt).get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) self.delete_policy_target(pt['policy_target']['id'], - expected_res_status=204) + expected_res_status=404) - def test_policy_target_port_deleted_on_apic_host_to_host(self): - ptg = self.create_policy_target_group()['policy_target_group'] - subnet = self._get_object('subnets', ptg['subnets'][0], self.api) - with self.port(subnet=subnet) as port: - # Create EP with bound port - port = self._bind_port_to_host(port['port']['id'], 'h1') - self.create_policy_target(policy_target_group_id=ptg['id'], - port_id=port['port']['id']) - - # Change port binding and notify driver - port_up = self._bind_port_to_host(port['port']['id'], 'h2') - self.driver.process_port_changed(context.get_admin_context(), - port['port'], port_up['port']) - - mgr = self.driver.apic_manager - # Path created 2 times - self.assertEqual(mgr.ensure_path_created_for_port.call_count, - 2) - # Path deleted 1 time - self.assertEqual(mgr.ensure_path_deleted_for_port.call_count, 1) - - def test_policy_target_port_not_deleted(self): - # Create 2 EP same PTG same host bound + def test_delete_policy_target_notification_no_apic_network(self): ptg = self.create_policy_target_group( name="ptg1")['policy_target_group'] pt1 = self.create_policy_target( policy_target_group_id=ptg['id'])['policy_target'] self._bind_port_to_host(pt1['port_id'], 'h1') - pt2 = self.create_policy_target( - policy_target_group_id=ptg['id'])['policy_target'] - self._bind_port_to_host(pt2['port_id'], 'h1') + # Implicit port will be deleted with the PT + self.delete_policy_target(pt1['id'], expected_res_status=204) + # No notification needed + self.assertFalse(self.driver.notifier.port_update.called) + self.driver.notifier.port_update.reset_mock() + subnet = self._get_object('subnets', ptg['subnets'][0], self.api) + with self.port(subnet=subnet) as port: + # Create EP with bound port + port = self._bind_port_to_host(port['port']['id'], 'h1') + pt1 = self.create_policy_target( + policy_target_group_id=ptg['id'], port_id=port['port']['id']) + # Explicit port won't be deleted with PT + self.delete_policy_target(pt1['policy_target']['id'], + expected_res_status=204) + # Issue notification for the agent + self.assertTrue(self.driver.notifier.port_update.called) - # Delete EP1 - self.new_delete_request('policy_targets', pt1['id'], - self.fmt).get_response(self.ext_api) - # APIC path not deleted - mgr = self.driver.apic_manager - self.assertEqual(mgr.ensure_path_deleted_for_port.call_count, 0) + def test_get_gbp_details(self): + ptg = self.create_policy_target_group( + name="ptg1")['policy_target_group'] + pt1 = self.create_policy_target( + policy_target_group_id=ptg['id'])['policy_target'] + self._bind_port_to_host(pt1['port_id'], 'h1') + mapping = self.driver.get_gbp_details(context.get_admin_context(), + device='tap%s' % pt1['port_id'], host='h1') + self.assertEqual(pt1['port_id'], mapping['port_id']) + self.assertEqual(ptg['id'], mapping['endpoint_group_name']) + self.assertEqual('someid', mapping['vm-name']) + + def test_get_gbp_details_shadow(self): + l2p = self.create_l2_policy()['l2_policy'] + network = self._get_object('networks', l2p['network_id'], self.api) + with self.subnet(network=network) as sub: + with self.port(subnet=sub) as port: + self._bind_port_to_host(port['port']['id'], 'h1') + mapping = self.driver.get_gbp_details( + context.get_admin_context(), + device='tap%s' % port['port']['id'], host='h1') + self.assertEqual(port['port']['id'], mapping['port_id']) + self.assertEqual(amap.SHADOW_PREFIX + l2p['id'], + mapping['endpoint_group_name']) + + def test_explicit_port(self): + with self.network() as net: + with self.subnet(network=net) as sub: + with self.port(subnet=sub) as port: + self._bind_port_to_host(port['port']['id'], 'h1') + l2p = self.create_l2_policy( + network_id=net['network']['id'])['l2_policy'] + ptg = self.create_policy_target_group( + l2_policy_id=l2p['id'])['policy_target_group'] + self.create_policy_target( + port_id=port['port']['id'], + policy_target_group_id=ptg['id']) + self.assertTrue(self.driver.notifier.port_update.called) + + def test_port_notified_on_changed_ptg(self): + ptg = self.create_policy_target_group()['policy_target_group'] + ptg2 = self.create_policy_target_group( + l2_policy_id=ptg['l2_policy_id'])['policy_target_group'] + pt = self.create_policy_target( + policy_target_group_id=ptg['id'])['policy_target'] + self._bind_port_to_host(pt['port_id'], 'h1') + + self.driver.notifier.port_update.reset_mock() + self.update_policy_target(pt['id'], policy_target_group_id=ptg2['id']) + self.assertTrue(self.driver.notifier.port_update.called) + + def test_update_ptg_failed(self): + ptg = self.create_policy_target_group()['policy_target_group'] + ptg2 = self.create_policy_target_group()['policy_target_group'] + pt = self.create_policy_target( + policy_target_group_id=ptg['id'])['policy_target'] + + res = self.update_policy_target( + pt['id'], policy_target_group_id=ptg2['id'], + expected_res_status=400) + self.assertEqual('InvalidPortForPTG', res['NeutronError']['type']) class TestPolicyTargetGroup(ApicMappingTestCase): @@ -228,9 +301,14 @@ class TestPolicyTargetGroup(ApicMappingTestCase): name="ptg1", shared=shared)['policy_target_group'] tenant = self.common_tenant if shared else ptg['tenant_id'] mgr = self.driver.apic_manager - mgr.ensure_epg_created.assert_called_once_with( - tenant, ptg['id'], bd_name=ptg['l2_policy_id'], - bd_owner=tenant) + expected_calls = [ + mock.call(tenant, ptg['id'], bd_name=ptg['l2_policy_id'], + bd_owner=tenant), + mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'], + bd_name=ptg['l2_policy_id'], bd_owner=tenant, + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.ensure_epg_created.call_args_list) def test_policy_target_group_created_on_apic(self): self._test_policy_target_group_created_on_apic() @@ -241,22 +319,44 @@ class TestPolicyTargetGroup(ApicMappingTestCase): def _test_ptg_policy_rule_set_created(self, provider=True, shared=False): cntr = self.create_policy_rule_set(name='c', shared=shared)['policy_rule_set'] - + l2p = self.create_l2_policy()['l2_policy'] + mgr = self.driver.apic_manager + mgr.set_contract_for_epg.reset_mock() if provider: ptg = self.create_policy_target_group( + l2_policy_id=l2p['id'], provided_policy_rule_sets={cntr['id']: 'scope'})[ 'policy_target_group'] else: ptg = self.create_policy_target_group( + l2_policy_id=l2p['id'], consumed_policy_rule_sets={cntr['id']: 'scope'})[ 'policy_target_group'] # Verify that the apic call is issued ct_owner = self.common_tenant if shared else cntr['tenant_id'] - mgr = self.driver.apic_manager - mgr.set_contract_for_epg.assert_called_with( - ptg['tenant_id'], ptg['id'], cntr['id'], transaction='transaction', - contract_owner=ct_owner, provider=provider) + expected_calls = [ + mock.call( + ptg['tenant_id'], ptg['id'], cntr['id'], + transaction=mock.ANY, contract_owner=ct_owner, + provider=provider), + mock.call( + ptg['tenant_id'], ptg['id'], + amap.SERVICE_PREFIX + ptg['l2_policy_id'], + transaction=mock.ANY, contract_owner=ptg['tenant_id'], + provider=False), + mock.call( + ptg['tenant_id'], ptg['id'], + amap.IMPLICIT_PREFIX + ptg['l2_policy_id'], + transaction=mock.ANY, contract_owner=ptg['tenant_id'], + provider=True), + mock.call( + ptg['tenant_id'], ptg['id'], + amap.IMPLICIT_PREFIX + ptg['l2_policy_id'], + transaction=mock.ANY, contract_owner=ptg['tenant_id'], + provider=False)] + self._check_call_list(expected_calls, + mgr.set_contract_for_epg.call_args_list) def _test_ptg_policy_rule_set_updated(self, provider=True, shared=False): p_or_c = {True: 'provided_policy_rule_sets', @@ -284,12 +384,12 @@ class TestPolicyTargetGroup(ApicMappingTestCase): ct_owner = self.common_tenant if shared else cntr['tenant_id'] mgr.set_contract_for_epg.assert_called_with( ptg['tenant_id'], ptg['id'], new_cntr['id'], - contract_owner=ct_owner, transaction='transaction', + contract_owner=ct_owner, transaction=mock.ANY, provider=provider) mgr.unset_contract_for_epg.assert_called_with( ptg['tenant_id'], ptg['id'], cntr['id'], contract_owner=ct_owner, - transaction='transaction', provider=provider) + transaction=mock.ANY, provider=provider) def test_ptg_policy_rule_set_provider_created(self): self._test_ptg_policy_rule_set_created() @@ -316,15 +416,20 @@ class TestPolicyTargetGroup(ApicMappingTestCase): self._test_ptg_policy_rule_set_updated(False, shared=True) def _test_policy_target_group_deleted_on_apic(self, shared=False): - ptg = self.create_policy_target_group(name="ptg1", - shared=shared)['policy_target_group'] + ptg = self.create_policy_target_group( + name="ptg1", shared=shared)['policy_target_group'] req = self.new_delete_request('policy_target_groups', ptg['id'], self.fmt) req.get_response(self.ext_api) mgr = self.driver.apic_manager tenant = self.common_tenant if shared else ptg['tenant_id'] - mgr.delete_epg_for_network.assert_called_once_with( - tenant, ptg['id']) + + expected_calls = [ + mock.call(tenant, ptg['id']), + mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'], + transaction=mock.ANY)] + self._check_call_list(expected_calls, + mgr.delete_epg_for_network.call_args_list) def test_policy_target_group_deleted_on_apic(self): self._test_policy_target_group_deleted_on_apic() @@ -340,7 +445,7 @@ class TestPolicyTargetGroup(ApicMappingTestCase): tenant = self.common_tenant if shared else ptg['tenant_id'] mgr.ensure_subnet_created_on_apic.assert_called_once_with( tenant, ptg['l2_policy_id'], '10.0.0.1/24', - transaction='transaction') + transaction=mock.ANY) def test_policy_target_group_subnet_created_on_apic(self): self._test_policy_target_group_subnet_created_on_apic() @@ -364,7 +469,7 @@ class TestPolicyTargetGroup(ApicMappingTestCase): tenant = self.common_tenant if shared else ptg['tenant_id'] mgr.ensure_subnet_created_on_apic.assert_called_with( tenant, ptg['l2_policy_id'], '10.0.1.1/24', - transaction='transaction') + transaction=mock.ANY) def test_policy_target_group_subnet_added(self): self._test_policy_target_group_subnet_added() @@ -385,10 +490,10 @@ class TestPolicyTargetGroup(ApicMappingTestCase): tenant = self.common_tenant if shared else ptg['tenant_id'] mgr.ensure_subnet_created_on_apic.assert_called_once_with( tenant, ptg['l2_policy_id'], '10.0.0.254/24', - transaction='transaction') + transaction=mock.ANY) mgr.ensure_subnet_deleted_on_apic.assert_called_with( tenant, ptg['l2_policy_id'], '10.0.0.1/24', - transaction='transaction') + transaction=mock.ANY) def test_process_subnet_update(self): self._test_process_subnet_update() @@ -396,16 +501,66 @@ class TestPolicyTargetGroup(ApicMappingTestCase): def test_process_subnet_update_shared(self): self._test_process_subnet_update(shared=True) + def test_multiple_ptg_per_l2p(self): + l2p = self.create_l2_policy()['l2_policy'] + # Create first PTG + ptg1 = self.create_policy_target_group( + l2_policy_id=l2p['id'])['policy_target_group'] + ptg2 = self.create_policy_target_group( + l2_policy_id=l2p['id'])['policy_target_group'] + self.assertEqual(ptg1['subnets'], ptg2['subnets']) + + def test_force_add_subnet(self): + l2p = self.create_l2_policy()['l2_policy'] + # Create first PTG + ptg1 = self.create_policy_target_group( + l2_policy_id=l2p['id'])['policy_target_group'] + ptg2 = self.create_policy_target_group( + l2_policy_id=l2p['id'])['policy_target_group'] + ctx = p_context.PolicyTargetGroupContext( + self.driver.gbp_plugin, context.get_admin_context(), ptg2) + # Emulate force add + self.driver._use_implicit_subnet(ctx, force_add=True) + # There now a new subnet, and it's added to both the PTGs + self.assertEqual(2, len(ctx.current['subnets'])) + ptg1 = self.show_policy_target_group(ptg1['id'])['policy_target_group'] + self.assertEqual(2, len(ptg1['subnets'])) + ptg2 = self.show_policy_target_group(ptg2['id'])['policy_target_group'] + self.assertEqual(2, len(ptg2['subnets'])) + self.assertEqual(set(ptg1['subnets']), set(ptg2['subnets'])) + self.assertNotEqual(ptg2['subnets'][0], ptg2['subnets'][1]) + + def test_subnets_unique_per_l3p(self): + l3p = self.create_l3_policy(shared=True, tenant_id='admin', + is_admin_context=True)['l3_policy'] + l2p1 = self.create_l2_policy( + tenant_id='hr', l3_policy_id=l3p['id'])['l2_policy'] + l2p2 = self.create_l2_policy( + tenant_id='eng', l3_policy_id=l3p['id'])['l2_policy'] + ptg1 = self.create_policy_target_group( + tenant_id='hr', l2_policy_id=l2p1['id'])['policy_target_group'] + ptg2 = self.create_policy_target_group( + tenant_id='eng', l2_policy_id=l2p2['id'])['policy_target_group'] + sub_ptg_1 = set(self._get_object('subnets', + x, self.api)['subnet']['cidr'] + for x in ptg1['subnets']) + sub_ptg_2 = set(self._get_object('subnets', + x, self.api)['subnet']['cidr'] + for x in ptg2['subnets']) + self.assertNotEqual(sub_ptg_1, sub_ptg_2) + self.assertFalse(sub_ptg_1 & sub_ptg_2) + def _create_explicit_subnet_ptg(self, cidr, shared=False): l2p = self.create_l2_policy(name="l2p", shared=shared) l2p_id = l2p['l2_policy']['id'] network_id = l2p['l2_policy']['network_id'] network = self._get_object('networks', network_id, self.api) - with self.subnet(network=network, cidr=cidr) as subnet: - subnet_id = subnet['subnet']['id'] + with self.subnet(network=network, cidr=cidr): + # The subnet creation in the proper network causes the subnet ID + # to be added to the PTG return self.create_policy_target_group( name="ptg1", l2_policy_id=l2p_id, - subnets=[subnet_id], shared=shared)['policy_target_group'] + shared=shared)['policy_target_group'] class TestL2Policy(ApicMappingTestCase): @@ -416,7 +571,11 @@ class TestL2Policy(ApicMappingTestCase): tenant = self.common_tenant if shared else l2p['tenant_id'] mgr = self.driver.apic_manager mgr.ensure_bd_created_on_apic.assert_called_once_with( - tenant, l2p['id'], ctx_owner=tenant, ctx_name=l2p['l3_policy_id']) + tenant, l2p['id'], ctx_owner=tenant, ctx_name=l2p['l3_policy_id'], + transaction=mock.ANY) + mgr.ensure_epg_created.assert_called_once_with( + tenant, amap.SHADOW_PREFIX + l2p['id'], bd_owner=tenant, + bd_name=l2p['id'], transaction=mock.ANY) def test_l2_policy_created_on_apic(self): self._test_l2_policy_created_on_apic() @@ -431,7 +590,17 @@ class TestL2Policy(ApicMappingTestCase): tenant = self.common_tenant if shared else l2p['tenant_id'] mgr = self.driver.apic_manager mgr.delete_bd_on_apic.assert_called_once_with( - tenant, l2p['id']) + tenant, l2p['id'], transaction=mock.ANY) + mgr.delete_epg_for_network.assert_called_once_with( + tenant, amap.SHADOW_PREFIX + l2p['id'], + transaction=mock.ANY) + expected_calls = [ + mock.call(amap.IMPLICIT_PREFIX + l2p['id'], owner=tenant, + transaction=mock.ANY), + mock.call(amap.SERVICE_PREFIX + l2p['id'], owner=tenant, + transaction=mock.ANY)] + self._check_call_list(expected_calls, + mgr.delete_contract.call_args_list) def test_l2_policy_deleted_on_apic(self): self._test_l2_policy_deleted_on_apic() @@ -439,6 +608,21 @@ class TestL2Policy(ApicMappingTestCase): def test_l2_policy_deleted_on_apic_shared(self): self._test_l2_policy_deleted_on_apic(shared=True) + def test_pre_existing_subnets_added(self): + with self.network() as net: + with self.subnet(network=net) as sub: + sub = sub['subnet'] + l2p = self.create_l2_policy( + network_id=net['network']['id'])['l2_policy'] + mgr = self.driver.apic_manager + mgr.ensure_subnet_created_on_apic.assert_called_with( + l2p['tenant_id'], l2p['id'], + sub['gateway_ip'] + '/' + sub['cidr'].split('/')[1], + transaction=mock.ANY) + ptg = self.create_policy_target_group( + l2_policy_id=l2p['id'])['policy_target_group'] + self.assertEqual(ptg['subnets'], [sub['id']]) + class TestL3Policy(ApicMappingTestCase): @@ -541,7 +725,7 @@ class TestL3Policy(ApicMappingTestCase): transaction=mock.ANY) mgr.ensure_logical_node_profile_created.assert_called_once_with( es['id'], mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE, - mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.0.3', + mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.0.3/24', owner=owner, router_id=APIC_EXTERNAL_RID, transaction=mock.ANY) @@ -596,7 +780,7 @@ class TestL3Policy(ApicMappingTestCase): transaction=mock.ANY) mgr.ensure_logical_node_profile_created.assert_called_once_with( es['id'], mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE, - mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.0.3', + mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.0.3/24', owner=owner, router_id=APIC_EXTERNAL_RID, transaction=mock.ANY) @@ -718,7 +902,7 @@ class TestL3Policy(ApicMappingTestCase): transaction=mock.ANY) mgr.ensure_logical_node_profile_created.assert_called_once_with( es2['id'], mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE, - mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.1.3', + mocked.APIC_EXT_PORT, mocked.APIC_EXT_ENCAP, '192.168.1.3/24', owner=owner, router_id=APIC_EXTERNAL_RID, transaction=mock.ANY) self.assertFalse(mgr.ensure_static_route_created.called) @@ -806,7 +990,7 @@ class TestPolicyRuleSet(ApicMappingTestCase): tenant = self.common_tenant if shared else ct['tenant_id'] mgr = self.driver.apic_manager mgr.create_contract.assert_called_once_with( - ct['id'], owner=tenant, transaction='transaction') + ct['id'], owner=tenant, transaction=mock.ANY) def test_policy_rule_set_created_on_apic(self): self._test_policy_rule_set_created_on_apic() @@ -825,26 +1009,49 @@ class TestPolicyRuleSet(ApicMappingTestCase): rule_owner = self.common_tenant if shared else rules[0]['tenant_id'] # Verify that the in-out rules are correctly enforced on the APIC mgr = self.driver.apic_manager - mgr.manage_contract_subject_in_filter.assert_called_once_with( - ctr['id'], ctr['id'], rules[in_d]['id'], owner=ctr['tenant_id'], - transaction='transaction', unset=False, - rule_owner=rule_owner) - mgr.manage_contract_subject_out_filter.assert_called_once_with( - ctr['id'], ctr['id'], rules[out]['id'], owner=ctr['tenant_id'], - transaction='transaction', unset=False, - rule_owner=rule_owner) + expected_calls = [ + mock.call(ctr['id'], ctr['id'], rules[in_d]['id'], + owner=ctr['tenant_id'], transaction=mock.ANY, + unset=False, rule_owner=rule_owner), + mock.call(ctr['id'], ctr['id'], + amap.REVERSE_PREFIX + rules[out]['id'], + owner=ctr['tenant_id'], transaction=mock.ANY, + unset=False, rule_owner=rule_owner)] + self._check_call_list( + expected_calls, + mgr.manage_contract_subject_in_filter.call_args_list) + + expected_calls = [ + mock.call(ctr['id'], ctr['id'], rules[out]['id'], + owner=ctr['tenant_id'], transaction=mock.ANY, + unset=False, rule_owner=rule_owner), + mock.call(ctr['id'], ctr['id'], + amap.REVERSE_PREFIX + rules[in_d]['id'], + owner=ctr['tenant_id'], transaction=mock.ANY, + unset=False, rule_owner=rule_owner)] + self._check_call_list( + expected_calls, + mgr.manage_contract_subject_out_filter.call_args_list) # Create policy_rule_set with BI rule ctr = self.create_policy_rule_set( name="ctr", policy_rules=[rules[bi]['id']])['policy_rule_set'] - mgr.manage_contract_subject_in_filter.assert_called_with( + mgr.manage_contract_subject_in_filter.call_happened_with( ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'], - transaction='transaction', unset=False, + transaction=mock.ANY, unset=False, rule_owner=rule_owner) - mgr.manage_contract_subject_out_filter.assert_called_with( + mgr.manage_contract_subject_out_filter.call_happened_with( ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'], - transaction='transaction', unset=False, + transaction=mock.ANY, unset=False, + rule_owner=rule_owner) + mgr.manage_contract_subject_in_filter.call_happened_with( + ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'], + owner=ctr['tenant_id'], transaction=mock.ANY, unset=False, + rule_owner=rule_owner) + mgr.manage_contract_subject_out_filter.call_happened_with( + ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'], + owner=ctr['tenant_id'], transaction=mock.ANY, unset=False, rule_owner=rule_owner) def test_policy_rule_set_created_with_rules(self): @@ -950,13 +1157,25 @@ class TestPolicyRuleSet(ApicMappingTestCase): class TestPolicyRule(ApicMappingTestCase): def _test_policy_rule_created_on_apic(self, shared=False): - pr = self._create_simple_policy_rule('in', 'udp', 88, shared=shared) + pr = self._create_simple_policy_rule('in', 'tcp', 88, shared=shared) tenant = self.common_tenant if shared else pr['tenant_id'] mgr = self.driver.apic_manager - mgr.create_tenant_filter.assert_called_once_with( - pr['id'], owner=tenant, etherT='ip', prot='udp', - dToPort=88, dFromPort=88) + expected_calls = [ + mock.call(pr['id'], owner=tenant, etherT='ip', prot='tcp', + dToPort=88, dFromPort=88, transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant, + etherT='ip', prot='tcp', sToPort=88, sFromPort=88, + tcpRules='est', transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.create_tenant_filter.call_args_list) + mgr.reset_mock() + pr = self._create_simple_policy_rule('bi', None, None, shared=shared) + expected_calls = [ + mock.call(pr['id'], owner=tenant, etherT='unspecified', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.create_tenant_filter.call_args_list) def test_policy_rule_created_on_apic(self): self._test_policy_rule_created_on_apic() @@ -981,8 +1200,12 @@ class TestPolicyRule(ApicMappingTestCase): tenant = self.common_tenant if shared else pr['tenant_id'] mgr = self.driver.apic_manager - mgr.delete_tenant_filter.assert_called_once_with( - pr['id'], owner=tenant) + expected_calls = [ + mock.call(pr['id'], owner=tenant, transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant, + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.delete_tenant_filter.call_args_list) def test_policy_rule_deleted_on_apic(self): self._test_policy_rule_deleted_on_apic() @@ -990,17 +1213,137 @@ class TestPolicyRule(ApicMappingTestCase): def test_policy_rule_deleted_on_apic_shared(self): self._test_policy_rule_deleted_on_apic(shared=True) - def _create_simple_policy_rule(self, direction='bi', protocol='tcp', - port_range=80, shared=False): - cls = self.create_policy_classifier( - direction=direction, protocol=protocol, - port_range=port_range, shared=shared)['policy_classifier'] + def test_policy_classifier_updated(self): + pa = self.create_policy_action( + action_type='allow', is_admin_context=True, + tenant_id='admin', shared=True)['policy_action'] + pc = self.create_policy_classifier( + direction='in', protocol='udp', port_range=80, + shared=True, is_admin_context=True, + tenant_id='admin')['policy_classifier'] + pr1 = self.create_policy_rule( + policy_classifier_id=pc['id'], policy_actions=[pa['id']], + shared=True, is_admin_context=True, + tenant_id='admin')['policy_rule'] + pr2 = self.create_policy_rule(policy_classifier_id=pc['id'], + policy_actions=[pa['id']])['policy_rule'] + prs1 = self.create_policy_rule_set( + policy_rules=[pr1['id']])['policy_rule_set'] + prs2 = self.create_policy_rule_set( + policy_rules=[pr2['id'], pr1['id']])['policy_rule_set'] - action = self.create_policy_action( - action_type='allow', shared=shared)['policy_action'] - return self.create_policy_rule( - policy_classifier_id=cls['id'], policy_actions=[action['id']], - shared=shared)['policy_rule'] + mgr = self.driver.apic_manager + mgr.reset_mock() + + # Remove Classifier port, should just delete and create the filter + self.update_policy_classifier(pc['id'], port_range=None, + is_admin_context=True) + expected_calls = [ + mock.call(pr1['id'], owner='common', etherT='ip', prot='udp', + transaction=mock.ANY), + mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='udp', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.create_tenant_filter.call_args_list) + expected_calls = [ + mock.call(pr1['id'], owner='common', transaction=mock.ANY), + mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common', + transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.delete_tenant_filter.call_args_list) + self.assertFalse(mgr.manage_contract_subject_in_filter.called) + self.assertFalse(mgr.manage_contract_subject_out_filter.called) + mgr.reset_mock() + + # Change Classifier protocol, to not revertible + self.update_policy_classifier(pc['id'], protocol='icmp', + is_admin_context=True) + expected_calls = [ + mock.call(pr1['id'], owner='common', etherT='ip', prot='icmp', + transaction=mock.ANY), + mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='icmp', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.create_tenant_filter.call_args_list) + expected_calls = [ + mock.call(pr1['id'], owner='common', transaction=mock.ANY), + mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common', + transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.delete_tenant_filter.call_args_list) + + self.assertFalse(mgr.manage_contract_subject_in_filter.called) + self.assertFalse(mgr.manage_contract_subject_out_filter.called) + mgr.reset_mock() + + # Change Classifier protocol to revertible + self.update_policy_classifier(pc['id'], protocol='tcp', + is_admin_context=True) + expected_calls = [ + mock.call(pr1['id'], owner='common', transaction=mock.ANY), + mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common', + transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.delete_tenant_filter.call_args_list) + expected_calls = [ + mock.call(pr1['id'], owner='common', etherT='ip', prot='tcp', + transaction=mock.ANY), + mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='tcp', + transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common', + etherT='ip', prot='tcp', tcpRules='est', + transaction=mock.ANY), + mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant', + etherT='ip', prot='tcp', tcpRules='est', + transaction=mock.ANY)] + self._check_call_list( + expected_calls, mgr.create_tenant_filter.call_args_list) + + expected_calls = [ + # Unset PR1 and PR2 IN + mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant', + transaction=mock.ANY, unset=True, rule_owner='common'), + mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant', + transaction=mock.ANY, unset=True, rule_owner='common'), + mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant', + transaction=mock.ANY, unset=True, + rule_owner='test-tenant'), + # SET PR1 and PR2 IN + mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant', + transaction=mock.ANY, unset=False, rule_owner='common'), + mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant', + transaction=mock.ANY, unset=False, rule_owner='common'), + mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant', + transaction=mock.ANY, unset=False, + rule_owner='test-tenant') + ] + self._check_call_list( + expected_calls, + mgr.manage_contract_subject_in_filter.call_args_list) + # SET Reverse PR1 and PR2 OUT + expected_calls = [ + mock.call(prs1['id'], prs1['id'], amap.REVERSE_PREFIX + pr1['id'], + owner='test-tenant', transaction=mock.ANY, unset=False, + rule_owner='common'), + mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr1['id'], + owner='test-tenant', transaction=mock.ANY, unset=False, + rule_owner='common'), + mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr2['id'], + owner='test-tenant', transaction=mock.ANY, unset=False, + rule_owner='test-tenant') + ] + self._check_call_list( + expected_calls, + mgr.manage_contract_subject_out_filter.call_args_list) class TestExternalSegment(ApicMappingTestCase): diff --git a/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py b/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py index acea7eac9..105329a78 100644 --- a/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py +++ b/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py @@ -49,12 +49,10 @@ class FakeDriver(object): class GroupPolicyPluginTestCase(tgpmdb.GroupPolicyMappingDbTestCase): - def setUp(self, core_plugin=None, gp_plugin=None): + def setUp(self, core_plugin=None, gp_plugin=None, ml2_options=None): if not gp_plugin: gp_plugin = GP_PLUGIN_KLASS - ml2_opts = { - 'mechanism_drivers': ['openvswitch'], - } + ml2_opts = ml2_options or {'mechanism_drivers': ['openvswitch']} for opt, val in ml2_opts.items(): cfg.CONF.set_override(opt, val, 'ml2') core_plugin = core_plugin or test_plugin.PLUGIN_NAME diff --git a/gbpservice/neutron/tests/unit/services/grouppolicy/test_resource_mapping.py b/gbpservice/neutron/tests/unit/services/grouppolicy/test_resource_mapping.py index b46342aa2..66d348641 100644 --- a/gbpservice/neutron/tests/unit/services/grouppolicy/test_resource_mapping.py +++ b/gbpservice/neutron/tests/unit/services/grouppolicy/test_resource_mapping.py @@ -61,7 +61,8 @@ CORE_PLUGIN = ('gbpservice.neutron.tests.unit.services.grouppolicy.' class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase): - def setUp(self, policy_drivers=None): + def setUp(self, policy_drivers=None, + core_plugin=n_test_plugin.PLUGIN_NAME, ml2_options=None): policy_drivers = policy_drivers or ['implicit_policy', 'resource_mapping'] config.cfg.CONF.set_override('policy_drivers', @@ -71,8 +72,8 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase): ['dummy'], group='servicechain') config.cfg.CONF.set_override('allow_overlapping_ips', True) - super(ResourceMappingTestCase, self).setUp( - core_plugin=n_test_plugin.PLUGIN_NAME) + super(ResourceMappingTestCase, self).setUp(core_plugin=core_plugin, + ml2_options=ml2_options) engine = db_api.get_engine() model_base.BASEV2.metadata.create_all(engine) res = mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.' @@ -323,6 +324,37 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase): filter_by(policy_target_group_id=ptg_id). all()) + def _create_service_profile(self, node_type='LOADBALANCER'): + data = {'service_profile': {'service_type': node_type, + 'tenant_id': self._tenant_id}} + scn_req = self.new_create_request(SERVICE_PROFILES, data, self.fmt) + node = self.deserialize(self.fmt, scn_req.get_response(self.ext_api)) + scn_id = node['service_profile']['id'] + return scn_id + + def _create_servicechain_node(self, node_type="LOADBALANCER"): + profile_id = self._create_service_profile(node_type) + data = {'servicechain_node': {'service_profile_id': profile_id, + 'tenant_id': self._tenant_id, + 'config': "{}"}} + scn_req = self.new_create_request(SERVICECHAIN_NODES, data, self.fmt) + node = self.deserialize(self.fmt, scn_req.get_response(self.ext_api)) + scn_id = node['servicechain_node']['id'] + return scn_id + + def _create_servicechain_spec(self, node_types=[]): + if not node_types: + node_types = ['LOADBALANCER'] + node_ids = [] + for node_type in node_types: + node_ids.append(self._create_servicechain_node(node_type)) + data = {'servicechain_spec': {'tenant_id': self._tenant_id, + 'nodes': node_ids}} + scs_req = self.new_create_request(SERVICECHAIN_SPECS, data, self.fmt) + spec = self.deserialize(self.fmt, scs_req.get_response(self.ext_api)) + scs_id = spec['servicechain_spec']['id'] + return scs_id + class TestPolicyTarget(ResourceMappingTestCase): @@ -1480,37 +1512,6 @@ class TestPolicyRuleSet(ResourceMappingTestCase): self._verify_prs_rules(policy_rule_set_id) - def _create_service_profile(self, node_type='LOADBALANCER'): - data = {'service_profile': {'service_type': node_type, - 'tenant_id': self._tenant_id}} - scn_req = self.new_create_request(SERVICE_PROFILES, data, self.fmt) - node = self.deserialize(self.fmt, scn_req.get_response(self.ext_api)) - scn_id = node['service_profile']['id'] - return scn_id - - def _create_servicechain_node(self, node_type="LOADBALANCER"): - profile_id = self._create_service_profile(node_type) - data = {'servicechain_node': {'service_profile_id': profile_id, - 'tenant_id': self._tenant_id, - 'config': "{}"}} - scn_req = self.new_create_request(SERVICECHAIN_NODES, data, self.fmt) - node = self.deserialize(self.fmt, scn_req.get_response(self.ext_api)) - scn_id = node['servicechain_node']['id'] - return scn_id - - def _create_servicechain_spec(self, node_types=[]): - if not node_types: - node_types = ['LOADBALANCER'] - node_ids = [] - for node_type in node_types: - node_ids.append(self._create_servicechain_node(node_type)) - data = {'servicechain_spec': {'tenant_id': self._tenant_id, - 'nodes': node_ids}} - scs_req = self.new_create_request(SERVICECHAIN_SPECS, data, self.fmt) - spec = self.deserialize(self.fmt, scs_req.get_response(self.ext_api)) - scs_id = spec['servicechain_spec']['id'] - return scs_id - def _create_provider_consumer_ptgs(self, prs_id=None): policy_rule_set_dict = {prs_id: None} if prs_id else {} provider_ptg = self.create_policy_target_group( diff --git a/requirements.txt b/requirements.txt index 63fe3f9ae..dc488f144 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. + oslosphinx>=2.5.0,<2.6.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index dbf73dfb7..402a8e06d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -20,3 +20,5 @@ requests-mock>=0.5.1 # Apache-2.0 testrepository>=0.0.18 testtools>=0.9.36,!=1.2.0 WebTest>=2.0 + +-e git+https://github.com/noironetworks/python-opflex-agent.git#egg=opflexagent