Cleanup Queens
Remove code no longer needed now that stable/queens is the oldest supported branch, and enhance compatabilty with newer neutron branches. Highlights include: * Use definitions from neutron_lib where appropriate. * Add gbpservice.neutron.db.api wrapper for definitions in neutron_lib.db.api in stable/rocky and newer that aren't moved yet in stable/queens, so that most DB code can remain unchanged across branches. In particular, this replaces db_api.context_manager.reader with db_api.CONTEXT_READER and db_api.context_manager.writer with db_api.CONTEXT_WRITER. * Eliminate some DeprecationWarning messages, such as those due to 'tenant' being renamed 'project'. * [AIM] Remove validation tool support for migrating SNAT resources from the legacy ACI plugin. * [AIM] Fix make_port_context to use a CONTEXT_WRITER instead of a CONTEXT_READER, since it can call get_network, which uses a CONTEXT_WRITER and transactions cannot be upgraded from read-only to read-write. * [AIM] Change UTs to use make_port_context instead of get_bound_port_context to prevent trying to bind the port if its not already bound. * [AIM] Remove quota-related monkey-patching that is no longer needed due to previous technical debt cleanup. * [AIM] Use the registry.receives decorator instead of calling registry.subscribe. * [AIM] Fix TypeError in _agent_bind_port. * [AIM] Simplify _net_2_epg function in test_apic_aim UT module. Change-Id: I01d57debd6884032267a6b7675883b6d1e61afcc
This commit is contained in:
parent
dc1f262a6d
commit
7f460160d6
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
|
||||
from neutron_lib import context as n_context
|
||||
@ -25,18 +24,6 @@ LOG = logging.getLogger(__name__)
|
||||
cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
|
||||
|
||||
|
||||
# REVISIT: Avoid using this in new code, and remove it when no longer
|
||||
# needed. Neutron and GBP REST API methods should not be called from
|
||||
# within transactions.
|
||||
@contextlib.contextmanager
|
||||
def transaction_guard_disabled(context):
|
||||
try:
|
||||
context.GUARD_TRANSACTION = False
|
||||
yield
|
||||
finally:
|
||||
context.GUARD_TRANSACTION = True
|
||||
|
||||
|
||||
def get_function_local_from_stack(function, local):
|
||||
frame = sys._getframe()
|
||||
while frame:
|
||||
|
@ -18,7 +18,7 @@ from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
||||
|
||||
from neutron.common import constants as n_constants
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics as n_topics
|
||||
from neutron_lib.agent import topics as n_topics
|
||||
|
||||
import oslo_messaging as messaging
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
from neutron.extensions import securitygroup as ext_sg
|
||||
from neutron.notifiers import nova
|
||||
from neutron import quota
|
||||
from neutron_lib import constants as nl_const
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from neutron_lib.exceptions import address_scope as as_exc
|
||||
from neutron_lib.exceptions import l3
|
||||
@ -47,7 +46,7 @@ class LocalAPI(object):
|
||||
def _l3_plugin(self):
|
||||
# REVISIT(rkukura): Need initialization method after all
|
||||
# plugins are loaded to grab and store plugin.
|
||||
l3_plugin = directory.get_plugin(nl_const.L3)
|
||||
l3_plugin = directory.get_plugin(pconst.L3)
|
||||
if not l3_plugin:
|
||||
LOG.error("No L3 router service plugin found.")
|
||||
raise exc.GroupPolicyDeploymentError()
|
||||
|
33
gbpservice/neutron/db/api.py
Normal file
33
gbpservice/neutron/db/api.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Copyright (c) 2020 Cisco Systems Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# REVISIT: Eliminate this module as soon as definitions from
|
||||
# neutron.db.api, which is eliminated in stein, are not longer
|
||||
# needed. Please DO NOT add any definition to this module that is not
|
||||
# a direct alias of a definion in the version of neutron_lib.db.api
|
||||
# corresponding to the newest neutron branch supported by this
|
||||
# repository.
|
||||
|
||||
from neutron.db import api as old_api
|
||||
from neutron_lib.db import api
|
||||
|
||||
get_context_manager = api.get_context_manager
|
||||
get_reader_session = api.get_reader_session
|
||||
get_writer_session = api.get_writer_session
|
||||
is_retriable = old_api.is_retriable
|
||||
retry_db_errors = old_api.retry_db_errors
|
||||
retry_if_session_inactive = old_api.retry_if_session_inactive
|
||||
CONTEXT_READER = get_context_manager().reader
|
||||
CONTEXT_WRITER = get_context_manager().writer
|
@ -12,7 +12,6 @@
|
||||
|
||||
import netaddr
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron_lib.api import validators
|
||||
from neutron_lib import constants
|
||||
@ -24,6 +23,7 @@ import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.extensions import group_policy as gpolicy
|
||||
from gbpservice.neutron.services.grouppolicy.common import (
|
||||
constants as gp_constants)
|
||||
@ -1099,7 +1099,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_policy_target(self, context, policy_target):
|
||||
pt = policy_target['policy_target']
|
||||
tenant_id = self._get_tenant_id_for_create(context, pt)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pt_db = PolicyTarget(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=pt['name'], description=pt['description'],
|
||||
@ -1113,14 +1113,14 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def update_policy_target(self, context, policy_target_id, policy_target):
|
||||
pt = policy_target['policy_target']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pt_db = self._get_policy_target(context, policy_target_id)
|
||||
pt_db.update(pt)
|
||||
return self._make_policy_target_dict(pt_db)
|
||||
|
||||
@log.log_method_call
|
||||
def delete_policy_target(self, context, policy_target_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pt_db = self._get_policy_target(context, policy_target_id)
|
||||
context.session.delete(pt_db)
|
||||
|
||||
@ -1151,7 +1151,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_policy_target_group(self, context, policy_target_group):
|
||||
ptg = policy_target_group['policy_target_group']
|
||||
tenant_id = self._get_tenant_id_for_create(context, ptg)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
if ptg['service_management']:
|
||||
self._validate_service_management_ptg(context, tenant_id)
|
||||
ptg_db = PolicyTargetGroup(
|
||||
@ -1173,7 +1173,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def update_policy_target_group(self, context, policy_target_group_id,
|
||||
policy_target_group):
|
||||
ptg = policy_target_group['policy_target_group']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
ptg_db = self._get_policy_target_group(
|
||||
context, policy_target_group_id)
|
||||
ptg = self._process_policy_rule_sets_for_ptg(context, ptg_db, ptg)
|
||||
@ -1182,7 +1182,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_policy_target_group(self, context, policy_target_group_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
ptg_db = self._get_policy_target_group(
|
||||
context, policy_target_group_id)
|
||||
# REVISIT(rkukura): An exception should be raised here if
|
||||
@ -1227,7 +1227,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
application_policy_group):
|
||||
apg = application_policy_group['application_policy_group']
|
||||
tenant_id = self._get_tenant_id_for_create(context, apg)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
apg_db = ApplicationPolicyGroup(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=apg['name'], description=apg['description'],
|
||||
@ -1242,7 +1242,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
application_policy_group_id,
|
||||
application_policy_group):
|
||||
apg = application_policy_group['application_policy_group']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
apg_db = self._get_application_policy_group(
|
||||
context, application_policy_group_id)
|
||||
apg_db.update(apg)
|
||||
@ -1251,7 +1251,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def delete_application_policy_group(self, context,
|
||||
application_policy_group_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
apg_db = self._get_application_policy_group(
|
||||
context, application_policy_group_id)
|
||||
context.session.delete(apg_db)
|
||||
@ -1285,7 +1285,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_l2_policy(self, context, l2_policy):
|
||||
l2p = l2_policy['l2_policy']
|
||||
tenant_id = self._get_tenant_id_for_create(context, l2p)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l2p_db = L2Policy(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id, name=l2p['name'],
|
||||
description=l2p['description'],
|
||||
@ -1301,14 +1301,14 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def update_l2_policy(self, context, l2_policy_id, l2_policy):
|
||||
l2p = l2_policy['l2_policy']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l2p_db = self._get_l2_policy(context, l2_policy_id)
|
||||
l2p_db.update(l2p)
|
||||
return self._make_l2_policy_dict(l2p_db)
|
||||
|
||||
@log.log_method_call
|
||||
def delete_l2_policy(self, context, l2_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l2p_db = self._get_l2_policy(context, l2_policy_id)
|
||||
# When delete_l2_policy is called implicitly (as a
|
||||
# side effect of the last PTG deletion), the L2P's
|
||||
@ -1351,7 +1351,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
self.validate_subnet_prefix_length(
|
||||
l3p['ip_version'], l3p['subnet_prefix_length'],
|
||||
l3p.get('ip_pool', None))
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l3p_db = L3Policy(
|
||||
id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id, name=l3p['name'],
|
||||
@ -1371,7 +1371,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def update_l3_policy(self, context, l3_policy_id, l3_policy):
|
||||
l3p = l3_policy['l3_policy']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l3p_db = self._get_l3_policy(context, l3_policy_id)
|
||||
if 'subnet_prefix_length' in l3p:
|
||||
self.validate_subnet_prefix_length(
|
||||
@ -1386,7 +1386,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_l3_policy(self, context, l3_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l3p_db = self._get_l3_policy(context, l3_policy_id)
|
||||
if l3p_db.l2_policies:
|
||||
raise gpolicy.L3PolicyInUse(l3_policy_id=l3_policy_id)
|
||||
@ -1419,7 +1419,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_network_service_policy(self, context, network_service_policy):
|
||||
nsp = network_service_policy['network_service_policy']
|
||||
tenant_id = self._get_tenant_id_for_create(context, nsp)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
nsp_db = NetworkServicePolicy(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=nsp['name'],
|
||||
@ -1437,7 +1437,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def update_network_service_policy(
|
||||
self, context, network_service_policy_id, network_service_policy):
|
||||
nsp = network_service_policy['network_service_policy']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
nsp_db = self._get_network_service_policy(
|
||||
context, network_service_policy_id)
|
||||
if 'network_service_params' in network_service_policy:
|
||||
@ -1449,7 +1449,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def delete_network_service_policy(
|
||||
self, context, network_service_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
nsp_db = self._get_network_service_policy(
|
||||
context, network_service_policy_id)
|
||||
if nsp_db.policy_target_groups:
|
||||
@ -1488,7 +1488,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
tenant_id = self._get_tenant_id_for_create(context, pc)
|
||||
port_min, port_max = GroupPolicyDbPlugin._get_min_max_ports_from_range(
|
||||
pc['port_range'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pc_db = PolicyClassifier(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=pc['name'],
|
||||
@ -1508,7 +1508,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def update_policy_classifier(self, context, policy_classifier_id,
|
||||
policy_classifier):
|
||||
pc = policy_classifier['policy_classifier']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pc_db = self._get_policy_classifier(context, policy_classifier_id)
|
||||
if 'port_range' in pc:
|
||||
port_min, port_max = (GroupPolicyDbPlugin.
|
||||
@ -1522,7 +1522,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_policy_classifier(self, context, policy_classifier_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pc_db = self._get_policy_classifier(context, policy_classifier_id)
|
||||
pc_ids = self._get_policy_classifier_rules(context,
|
||||
policy_classifier_id)
|
||||
@ -1559,7 +1559,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_policy_action(self, context, policy_action):
|
||||
pa = policy_action['policy_action']
|
||||
tenant_id = self._get_tenant_id_for_create(context, pa)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pa_db = PolicyAction(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=pa['name'],
|
||||
@ -1576,14 +1576,14 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def update_policy_action(self, context, policy_action_id, policy_action):
|
||||
pa = policy_action['policy_action']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pa_db = self._get_policy_action(context, policy_action_id)
|
||||
pa_db.update(pa)
|
||||
return self._make_policy_action_dict(pa_db)
|
||||
|
||||
@log.log_method_call
|
||||
def delete_policy_action(self, context, policy_action_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pa_db = self._get_policy_action(context, policy_action_id)
|
||||
pa_ids = self._get_policy_action_rules(context, policy_action_id)
|
||||
if pa_ids:
|
||||
@ -1618,7 +1618,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_policy_rule(self, context, policy_rule):
|
||||
pr = policy_rule['policy_rule']
|
||||
tenant_id = self._get_tenant_id_for_create(context, pr)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pr_db = PolicyRule(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id, name=pr['name'],
|
||||
description=pr['description'],
|
||||
@ -1635,7 +1635,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def update_policy_rule(self, context, policy_rule_id, policy_rule):
|
||||
pr = policy_rule['policy_rule']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pr_db = self._get_policy_rule(context, policy_rule_id)
|
||||
if 'policy_actions' in pr:
|
||||
self._set_actions_for_rule(context, pr_db,
|
||||
@ -1646,7 +1646,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_policy_rule(self, context, policy_rule_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pr_db = self._get_policy_rule(context, policy_rule_id)
|
||||
prs_ids = self._get_policy_rule_policy_rule_sets(context,
|
||||
policy_rule_id)
|
||||
@ -1681,7 +1681,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_policy_rule_set(self, context, policy_rule_set):
|
||||
prs = policy_rule_set['policy_rule_set']
|
||||
tenant_id = self._get_tenant_id_for_create(context, prs)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
prs_db = PolicyRuleSet(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=prs['name'],
|
||||
@ -1700,7 +1700,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def update_policy_rule_set(self, context, policy_rule_set_id,
|
||||
policy_rule_set):
|
||||
prs = policy_rule_set['policy_rule_set']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
prs_db = self._get_policy_rule_set(context, policy_rule_set_id)
|
||||
if 'policy_rules' in prs:
|
||||
self._set_rules_for_policy_rule_set(
|
||||
@ -1715,7 +1715,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_policy_rule_set(self, context, policy_rule_set_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
prs_db = self._get_policy_rule_set(context, policy_rule_set_id)
|
||||
prs_ids = (
|
||||
self._get_ptgs_for_providing_policy_rule_set(
|
||||
@ -1759,7 +1759,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_external_policy(self, context, external_policy):
|
||||
ep = external_policy['external_policy']
|
||||
tenant_id = self._get_tenant_id_for_create(context, ep)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
ep_db = ExternalPolicy(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=ep['name'], description=ep['description'],
|
||||
@ -1777,7 +1777,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def update_external_policy(self, context, external_policy_id,
|
||||
external_policy):
|
||||
ep = external_policy['external_policy']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
ep_db = self._get_external_policy(
|
||||
context, external_policy_id)
|
||||
if 'external_segments' in ep:
|
||||
@ -1814,7 +1814,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_external_policy(self, context, external_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
ep_db = self._get_external_policy(
|
||||
context, external_policy_id)
|
||||
context.session.delete(ep_db)
|
||||
@ -1823,7 +1823,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_external_segment(self, context, external_segment):
|
||||
es = external_segment['external_segment']
|
||||
tenant_id = self._get_tenant_id_for_create(context, es)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es_db = ExternalSegment(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=es['name'], description=es['description'],
|
||||
@ -1841,7 +1841,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def update_external_segment(self, context, external_segment_id,
|
||||
external_segment):
|
||||
es = external_segment['external_segment']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es_db = self._get_external_segment(
|
||||
context, external_segment_id)
|
||||
if 'external_routes' in es:
|
||||
@ -1876,7 +1876,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_external_segment(self, context, external_segment_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es_db = self._get_external_segment(
|
||||
context, external_segment_id)
|
||||
context.session.delete(es_db)
|
||||
@ -1885,7 +1885,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
def create_nat_pool(self, context, nat_pool):
|
||||
np = nat_pool['nat_pool']
|
||||
tenant_id = self._get_tenant_id_for_create(context, np)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
np_db = NATPool(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=np['name'], description=np['description'],
|
||||
@ -1900,7 +1900,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
@log.log_method_call
|
||||
def update_nat_pool(self, context, nat_pool_id, nat_pool):
|
||||
np = nat_pool['nat_pool']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
np_db = self._get_nat_pool(
|
||||
context, nat_pool_id)
|
||||
np_db.update(np)
|
||||
@ -1930,6 +1930,6 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_nat_pool(self, context, nat_pool_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
np_db = self._get_nat_pool(context, nat_pool_id)
|
||||
context.session.delete(np_db)
|
||||
|
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import models_v2
|
||||
from neutron_lib.db import model_base
|
||||
from neutron_lib import exceptions as nexc
|
||||
@ -21,6 +20,7 @@ from sqlalchemy import orm
|
||||
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.common import utils as gbp_utils
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
|
||||
from gbpservice.neutron.extensions import group_policy as gpolicy
|
||||
from gbpservice.neutron.services.grouppolicy.common import exceptions
|
||||
@ -429,7 +429,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
def create_policy_target(self, context, policy_target):
|
||||
pt = policy_target['policy_target']
|
||||
tenant_id = self._get_tenant_id_for_create(context, pt)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
self._validate_pt_port_exta_attributes(context, pt)
|
||||
pt_db = PolicyTargetMapping(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
@ -465,7 +465,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
def create_policy_target_group(self, context, policy_target_group):
|
||||
ptg = policy_target_group['policy_target_group']
|
||||
tenant_id = self._get_tenant_id_for_create(context, ptg)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
if ptg['service_management']:
|
||||
self._validate_service_management_ptg(context, tenant_id)
|
||||
uuid = ptg.get('id')
|
||||
@ -495,7 +495,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
def update_policy_target_group(self, context, policy_target_group_id,
|
||||
policy_target_group):
|
||||
ptg = policy_target_group['policy_target_group']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
ptg_db = self._get_policy_target_group(
|
||||
context, policy_target_group_id)
|
||||
self._process_policy_rule_sets_for_ptg(context, ptg_db, ptg)
|
||||
@ -550,7 +550,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
def create_l2_policy(self, context, l2_policy):
|
||||
l2p = l2_policy['l2_policy']
|
||||
tenant_id = self._get_tenant_id_for_create(context, l2p)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l2p_db = L2PolicyMapping(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=l2p['name'],
|
||||
@ -591,7 +591,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
self.validate_subnet_prefix_length(l3p['ip_version'],
|
||||
l3p['subnet_prefix_length'],
|
||||
l3p.get('ip_pool', None))
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l3p_db = L3PolicyMapping(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=l3p['name'],
|
||||
@ -633,7 +633,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
if 'address_scope_v4_id' in l3p or 'address_scope_v6_id' in l3p:
|
||||
raise AddressScopeUpdateForL3PNotSupported()
|
||||
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l3p_db = self._get_l3_policy(context, l3_policy_id)
|
||||
|
||||
self._update_subnetpools_for_l3_policy(context, l3_policy_id,
|
||||
@ -679,7 +679,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
def create_external_segment(self, context, external_segment):
|
||||
es = external_segment['external_segment']
|
||||
tenant_id = self._get_tenant_id_for_create(context, es)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es_db = ExternalSegmentMapping(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=es['name'], description=es['description'],
|
||||
@ -713,7 +713,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
|
||||
def create_nat_pool(self, context, nat_pool):
|
||||
np = nat_pool['nat_pool']
|
||||
tenant_id = self._get_tenant_id_for_create(context, np)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
np_db = NATPoolMapping(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=np['name'], description=np['description'],
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
import ast
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron_lib.db import model_base
|
||||
from neutron_lib import exceptions as n_exc
|
||||
@ -27,6 +26,7 @@ from sqlalchemy.ext.orderinglist import ordering_list
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.extensions import servicechain as schain
|
||||
from gbpservice.neutron.services.servicechain.common import exceptions as s_exc
|
||||
|
||||
@ -259,7 +259,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def create_servicechain_node(self, context, servicechain_node):
|
||||
node = servicechain_node['servicechain_node']
|
||||
tenant_id = self._get_tenant_id_for_create(context, node)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
node_db = ServiceChainNode(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=node['name'], description=node['description'],
|
||||
@ -275,7 +275,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def update_servicechain_node(self, context, servicechain_node_id,
|
||||
servicechain_node, set_params=False):
|
||||
node = servicechain_node['servicechain_node']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
node_db = self._get_servicechain_node(context,
|
||||
servicechain_node_id)
|
||||
node_db.update(node)
|
||||
@ -291,7 +291,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_servicechain_node(self, context, servicechain_node_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
node_db = self._get_servicechain_node(context,
|
||||
servicechain_node_id)
|
||||
if node_db.specs:
|
||||
@ -426,7 +426,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
set_params=True):
|
||||
spec = servicechain_spec['servicechain_spec']
|
||||
tenant_id = self._get_tenant_id_for_create(context, spec)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
spec_db = ServiceChainSpec(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=spec['name'],
|
||||
@ -444,7 +444,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def update_servicechain_spec(self, context, spec_id,
|
||||
servicechain_spec, set_params=True):
|
||||
spec = servicechain_spec['servicechain_spec']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
spec_db = self._get_servicechain_spec(context,
|
||||
spec_id)
|
||||
spec = self._process_nodes_for_spec(context, spec_db, spec,
|
||||
@ -458,7 +458,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
context, filters={"action_value": [spec_id]})
|
||||
if policy_actions:
|
||||
raise schain.ServiceChainSpecInUse(spec_id=spec_id)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
spec_db = self._get_servicechain_spec(context,
|
||||
spec_id)
|
||||
if spec_db.instances:
|
||||
@ -493,7 +493,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def create_servicechain_instance(self, context, servicechain_instance):
|
||||
instance = servicechain_instance['servicechain_instance']
|
||||
tenant_id = self._get_tenant_id_for_create(context, instance)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
if not instance.get('management_ptg_id'):
|
||||
management_groups = (
|
||||
self._grouppolicy_plugin.get_policy_target_groups(
|
||||
@ -525,7 +525,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def update_servicechain_instance(self, context, servicechain_instance_id,
|
||||
servicechain_instance):
|
||||
instance = servicechain_instance['servicechain_instance']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
instance_db = self._get_servicechain_instance(
|
||||
context, servicechain_instance_id)
|
||||
instance = self._process_specs_for_instance(context, instance_db,
|
||||
@ -535,7 +535,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_servicechain_instance(self, context, servicechain_instance_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
instance_db = self._get_servicechain_instance(
|
||||
context, servicechain_instance_id)
|
||||
context.session.delete(instance_db)
|
||||
@ -572,7 +572,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def create_service_profile(self, context, service_profile):
|
||||
profile = service_profile['service_profile']
|
||||
tenant_id = self._get_tenant_id_for_create(context, profile)
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
profile_db = ServiceProfile(
|
||||
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
|
||||
name=profile['name'], description=profile['description'],
|
||||
@ -590,7 +590,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
def update_service_profile(self, context, service_profile_id,
|
||||
service_profile):
|
||||
profile = service_profile['service_profile']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
profile_db = self._get_service_profile(context,
|
||||
service_profile_id)
|
||||
profile_db.update(profile)
|
||||
@ -598,7 +598,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
|
||||
|
||||
@log.log_method_call
|
||||
def delete_service_profile(self, context, service_profile_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
profile_db = self._get_service_profile(context,
|
||||
service_profile_id)
|
||||
if profile_db.nodes:
|
||||
|
@ -14,7 +14,6 @@ import abc
|
||||
import re
|
||||
|
||||
from neutron.api import extensions as neutron_extensions
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.api.v2 import resource_helper
|
||||
from neutron_lib.api import converters as conv
|
||||
from neutron_lib.api import extensions
|
||||
@ -496,8 +495,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'network_service_policy_id': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:uuid_or_none': None},
|
||||
'default': None, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
'service_management': {'allow_post': True, 'allow_put': True,
|
||||
@ -527,8 +527,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'validate': {'type:uuid_list': None},
|
||||
'convert_to': conv.convert_none_to_empty_list,
|
||||
'default': None, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
},
|
||||
@ -561,8 +562,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'default': True, 'is_visible': True,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'required': False},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
# TODO(Sumit): uncomment when supported in data path
|
||||
@ -608,8 +610,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'validate': {'type:uuid_list': None},
|
||||
'convert_to': conv.convert_none_to_empty_list,
|
||||
'default': None, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
'external_segments': {
|
||||
@ -646,8 +649,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'validate': {'type:values': gp_supported_directions},
|
||||
'default': gp_constants.GP_DIRECTION_BI,
|
||||
'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
},
|
||||
@ -677,7 +681,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'action_value': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:uuid_or_none': None},
|
||||
'default': None, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
@ -709,7 +713,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'default': None, 'is_visible': True,
|
||||
'validate': {'type:uuid_list': None},
|
||||
'convert_to': conv.convert_none_to_empty_list},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
@ -758,7 +762,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'providing_external_policies': {
|
||||
'allow_post': False, 'allow_put': False, 'default': None,
|
||||
'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
@ -788,7 +792,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'validate':
|
||||
{'type:network_service_params': None},
|
||||
'default': None, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
@ -824,7 +828,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'convert_to':
|
||||
conv.convert_none_to_empty_dict,
|
||||
'default': None, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
@ -876,7 +880,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'validate': {'type:uuid_list': None},
|
||||
'default': [],
|
||||
'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
@ -908,7 +912,7 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'external_segment_id': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:uuid_or_none': None},
|
||||
'is_visible': True, 'required': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
|
@ -14,24 +14,21 @@ import copy
|
||||
|
||||
from neutron.api import extensions
|
||||
from neutron.api.v2 import resource as neutron_resource
|
||||
from neutron.db import address_scope_db
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.db import securitygroups_db
|
||||
from neutron.objects import subnetpool as subnetpool_obj
|
||||
from neutron.plugins.ml2 import db as ml2_db
|
||||
from neutron.quota import resource as quota_resource
|
||||
from neutron_lib.api import attributes
|
||||
from neutron_lib.api import validators
|
||||
from neutron_lib import exceptions
|
||||
from neutron_lib.exceptions import address_scope as as_exc
|
||||
from neutron_lib.plugins import directory
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.common import utils as gbp_utils
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -149,25 +146,6 @@ common_db_mixin.CommonDbMixin._get_tenant_id_for_create = (
|
||||
_get_tenant_id_for_create)
|
||||
|
||||
|
||||
# REVISIT: In ocata, the switch to new engine facade in neutron is partial.
|
||||
# This can result in different facades being mixed up within same transaction,
|
||||
# and inconsistent behavior. Specifically, when L3 policy is deleted,
|
||||
# subnetpool is deleted (old facade), and address scope (new facade) fails to
|
||||
# be deleted since the dependent subnetpool deletion is in different session
|
||||
# that is not yet commited. The workaround is to switch address scope to old
|
||||
# engine facade. This workaround should be removed in Pike.
|
||||
def _delete_address_scope(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
if subnetpool_obj.SubnetPool.get_objects(context,
|
||||
address_scope_id=id):
|
||||
raise as_exc.AddressScopeInUse(address_scope_id=id)
|
||||
address_scope = self._get_address_scope(context, id)
|
||||
address_scope.delete()
|
||||
|
||||
address_scope_db.AddressScopeDbMixin.delete_address_scope = (
|
||||
_delete_address_scope)
|
||||
|
||||
|
||||
def extend_resources(self, version, attr_map):
|
||||
"""Extend resources with additional resources or attributes.
|
||||
|
||||
@ -362,7 +340,7 @@ try:
|
||||
self._check_ip_prefix_valid(destination_ip_prefix, ethertype)
|
||||
logical_source_port = fc['logical_source_port']
|
||||
logical_destination_port = fc['logical_destination_port']
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
if logical_source_port is not None:
|
||||
self._get_port(context, logical_source_port)
|
||||
if logical_destination_port is not None:
|
||||
|
@ -13,11 +13,11 @@
|
||||
import abc
|
||||
|
||||
from neutron.api import extensions as neutron_extensions
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.api.v2 import resource_helper
|
||||
from neutron_lib.api import converters as conv
|
||||
from neutron_lib.api import extensions
|
||||
from neutron_lib.api import validators as valid
|
||||
from neutron_lib import constants as nlib_const
|
||||
from neutron_lib import exceptions as nexc
|
||||
from neutron_lib.plugins import constants
|
||||
from neutron_lib.services import base as service_base
|
||||
@ -134,8 +134,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'config': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:string': None},
|
||||
'required': True, 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
},
|
||||
@ -164,8 +165,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'config_param_names': {'allow_post': False, 'allow_put': False,
|
||||
'validate': {'type:string_list': None},
|
||||
'default': [], 'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
},
|
||||
@ -228,8 +230,9 @@ RESOURCE_ATTRIBUTE_MAP = {
|
||||
'is_visible': True},
|
||||
'status_details': {'allow_post': False, 'allow_put': False,
|
||||
'is_visible': True},
|
||||
attr.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False, 'convert_to': conv.convert_to_boolean,
|
||||
nlib_const.SHARED: {'allow_post': True, 'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': conv.convert_to_boolean,
|
||||
'is_visible': True, 'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
'vendor': {'allow_post': True, 'allow_put': True,
|
||||
|
@ -40,15 +40,8 @@ class ProjectDetailsCache(object):
|
||||
self.project_details = {}
|
||||
self.keystone = None
|
||||
self.gbp = None
|
||||
# This is needed for the legacy GBP plugin, which also
|
||||
# uses the cache. This can be reverted once newton support
|
||||
# is dropped
|
||||
if hasattr(cfg.CONF, 'ml2_apic_aim'):
|
||||
ml2_cfg = cfg.CONF.ml2_apic_aim
|
||||
self.enable_neutronclient_internal_ep_interface = (
|
||||
ml2_cfg.enable_neutronclient_internal_ep_interface)
|
||||
else:
|
||||
self.enable_neutronclient_internal_ep_interface = False
|
||||
cfg.CONF.ml2_apic_aim.enable_neutronclient_internal_ep_interface)
|
||||
|
||||
def _get_keystone_client(self):
|
||||
# REVISIT: It seems load_from_conf_options() and
|
||||
|
@ -56,7 +56,7 @@ apic_opts = [
|
||||
help=("The pool of IPs where we allocate the APIC "
|
||||
"router ID from while creating the SVI interface.")),
|
||||
cfg.DictOpt('migrate_ext_net_dns', default={},
|
||||
help="DNs for external networks being migrated from legacy "
|
||||
help="DNs for external networks being migrated from other "
|
||||
"plugin, formatted as a dictionary mapping Neutron external "
|
||||
"network IDs (UUIDs) to ACI external network distinguished "
|
||||
"names."),
|
||||
|
@ -14,7 +14,6 @@
|
||||
# under the License.
|
||||
|
||||
from aim.api import resource as aim_resource
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db.models import address_scope as as_db
|
||||
from neutron.db import models_v2
|
||||
from neutron_lib import context as n_context
|
||||
@ -25,6 +24,8 @@ import sqlalchemy as sa
|
||||
from sqlalchemy.ext import baked
|
||||
from sqlalchemy import orm
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
|
||||
VM_UPDATE_PURPOSE = 'VmUpdate'
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
@ -16,12 +16,12 @@
|
||||
from aim.api import resource as aim_res
|
||||
from aim import exceptions as aim_exc
|
||||
from neutron.api import extensions
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from neutron_lib.plugins import directory
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron import extensions as extensions_pkg
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
|
||||
|
@ -35,9 +35,7 @@ from aim import exceptions as aim_exceptions
|
||||
from aim import utils as aim_utils
|
||||
from neutron.agent import securitygroups_rpc
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics as n_topics
|
||||
from neutron.common import utils as n_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db.models import address_scope as as_db
|
||||
from neutron.db.models import allowed_address_pair as n_addr_pair_db
|
||||
from neutron.db.models import l3 as l3_db
|
||||
@ -54,6 +52,7 @@ from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
|
||||
from neutron.plugins.ml2 import models
|
||||
from neutron.services.trunk import constants as trunk_consts
|
||||
from neutron.services.trunk import exceptions as trunk_exc
|
||||
from neutron_lib.agent import topics as n_topics
|
||||
from neutron_lib.api.definitions import external_net
|
||||
from neutron_lib.api.definitions import portbindings
|
||||
from neutron_lib.api.definitions import trunk
|
||||
@ -64,6 +63,7 @@ from neutron_lib.callbacks import resources
|
||||
from neutron_lib import constants as n_constants
|
||||
from neutron_lib import context as nctx
|
||||
from neutron_lib import exceptions as n_exceptions
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from neutron_lib.plugins import directory
|
||||
from neutron_lib.plugins.ml2 import api
|
||||
from neutron_lib.utils import net
|
||||
@ -77,6 +77,7 @@ from oslo_service import loopingcall
|
||||
from oslo_utils import importutils
|
||||
|
||||
from gbpservice.common import utils as gbp_utils
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
from gbpservice.neutron.extensions import cisco_apic_l3 as a_l3
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
|
||||
@ -130,10 +131,6 @@ NO_ADDR_SCOPE = object()
|
||||
DVS_AGENT_KLASS = 'networking_vsphere.common.dvs_agent_rpc_api.DVSClientAPI'
|
||||
DEFAULT_HOST_DOMAIN = '*'
|
||||
|
||||
LEGACY_SNAT_NET_NAME_PREFIX = 'host-snat-network-for-internal-use-'
|
||||
LEGACY_SNAT_SUBNET_NAME = 'host-snat-pool-for-internal-use'
|
||||
LEGACY_SNAT_PORT_NAME = 'host-snat-pool-port-for-internal-use'
|
||||
LEGACY_SNAT_PORT_DEVICE_OWNER = 'host-snat-pool-port-device-owner-internal-use'
|
||||
LL_INFO = 'local_link_information'
|
||||
|
||||
# TODO(kentwu): Move this to AIM utils maybe to avoid adding too much
|
||||
@ -294,7 +291,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
def _update_nova_vm_name_cache(self):
|
||||
current_time = datetime.now()
|
||||
context = nctx.get_admin_context()
|
||||
with db_api.context_manager.reader.using(context) as session:
|
||||
with db_api.CONTEXT_READER.using(context) as session:
|
||||
vm_name_update = self._get_vm_name_update(session)
|
||||
is_full_update = True
|
||||
if vm_name_update:
|
||||
@ -319,7 +316,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
return
|
||||
|
||||
try:
|
||||
with db_api.context_manager.writer.using(context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
self._set_vm_name_update(
|
||||
session, vm_name_update, self.host_id, current_time,
|
||||
current_time if is_full_update else None)
|
||||
@ -334,7 +331,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
vm_list.append((vm.id, vm.name))
|
||||
nova_vms = set(vm_list)
|
||||
|
||||
with db_api.context_manager.writer.using(context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
cached_vms = self._get_vm_names(session)
|
||||
cached_vms = set(cached_vms)
|
||||
|
||||
@ -611,8 +608,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
# TODO(rkukura): Move the following to calls made from
|
||||
# precommit methods so AIM Tenants, ApplicationProfiles, and
|
||||
# Filters are [re]created whenever needed.
|
||||
with db_api.context_manager.writer.using(plugin_context):
|
||||
session = plugin_context.session
|
||||
with db_api.CONTEXT_WRITER.using(plugin_context) as session:
|
||||
tenant_aname = self.name_mapper.project(session, project_id)
|
||||
project_details = (self.project_details_cache.
|
||||
get_project_details(project_id))
|
||||
@ -2524,7 +2520,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
removed = list(set(orig_ips) - set(curr_ips))
|
||||
for aap in removed:
|
||||
cidr = netaddr.IPNetwork(aap)
|
||||
with db_api.context_manager.writer.using(p_context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(p_context) as session:
|
||||
# Get all the owned IP addresses for the port, and if
|
||||
# they match a removed AAP entry, delete that entry
|
||||
# from the DB
|
||||
@ -2573,7 +2569,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
return
|
||||
|
||||
# Get the static ports for the new binding.
|
||||
with db_api.context_manager.reader.using(context):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
static_ports = self._get_static_ports(
|
||||
context, bind_context.host, bind_context.bottom_bound_segment,
|
||||
port_context=bind_context)
|
||||
@ -2916,7 +2912,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
pod_id, port_description)]))
|
||||
if not switch:
|
||||
return
|
||||
with db_api.context_manager.writer.using(context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
hlink = self.aim.get(aim_ctx,
|
||||
aim_infra.HostLink(host_name=host,
|
||||
@ -2942,7 +2938,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
LOG.debug('Topology RPC: delete_link: %s',
|
||||
', '.join([str(p) for p in
|
||||
(host, interface, mac, switch, module, port)]))
|
||||
with db_api.context_manager.writer.using(context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
hlink = self.aim.get(aim_ctx,
|
||||
aim_infra.HostLink(host_name=host,
|
||||
@ -2963,7 +2959,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
# this is all good in theory, it would require some extra design
|
||||
# due to the fact that VPC interfaces have the same path but
|
||||
# two different ifaces assigned to them.
|
||||
with db_api.context_manager.writer.using(context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
hlinks = self.aim.find(aim_ctx, aim_infra.HostLink, host_name=host)
|
||||
nets_segs = self._get_non_opflex_segments_on_host(context, host)
|
||||
@ -2977,7 +2973,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
|
||||
def _agent_bind_port(self, context, agent_type, bind_strategy):
|
||||
current = context.current
|
||||
for agent in context.host_agents(agent_type):
|
||||
for agent in context.host_agents(agent_type) or []:
|
||||
LOG.debug("Checking agent: %s", agent)
|
||||
if agent['alive']:
|
||||
for segment in context.segments_to_bind:
|
||||
@ -3204,7 +3200,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
@property
|
||||
def l3_plugin(self):
|
||||
if not self._l3_plugin:
|
||||
self._l3_plugin = directory.get_plugin(n_constants.L3)
|
||||
self._l3_plugin = directory.get_plugin(pconst.L3)
|
||||
return self._l3_plugin
|
||||
|
||||
@property
|
||||
@ -4209,7 +4205,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
'gateway_ip': <gateway_ip of subnet>,
|
||||
'prefixlen': <prefix_length_of_subnet>}
|
||||
"""
|
||||
with db_api.context_manager.reader.using(plugin_context) as session:
|
||||
with db_api.CONTEXT_READER.using(plugin_context) as session:
|
||||
# Query for existing SNAT port.
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.IPAllocation.ip_address,
|
||||
@ -4318,7 +4314,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
|
||||
def _delete_unneeded_snat_ip_ports(self, plugin_context, ext_network_id):
|
||||
snat_port_ids = []
|
||||
with db_api.context_manager.reader.using(plugin_context) as session:
|
||||
with db_api.CONTEXT_READER.using(plugin_context) as session:
|
||||
# Query for any interfaces of routers with gateway ports
|
||||
# on this external network.
|
||||
query = BAKERY(lambda s: s.query(
|
||||
@ -4459,7 +4455,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
def _rebuild_host_path_for_network(self, plugin_context, network, segment,
|
||||
host, host_links):
|
||||
# Look up the static ports for this host and segment.
|
||||
with db_api.context_manager.reader.using(plugin_context):
|
||||
with db_api.CONTEXT_READER.using(plugin_context):
|
||||
static_ports = self._get_static_ports(
|
||||
plugin_context, host, segment)
|
||||
|
||||
@ -4470,7 +4466,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
plugin_context, static_ports, network)
|
||||
|
||||
# Rebuild the static paths.
|
||||
with db_api.context_manager.writer.using(plugin_context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(plugin_context) as session:
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
if self._is_svi(network):
|
||||
l3out, _, _ = self._get_aim_external_objects(network)
|
||||
@ -5586,7 +5582,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
# REVISIT: Callers often only need the bottom bound segment and
|
||||
# maybe the host, so consider a simpler alternative.
|
||||
def make_port_context(self, plugin_context, port_id):
|
||||
with db_api.context_manager.reader.using(plugin_context):
|
||||
# REVISIT: Use CONTEXT_READER once upstream ML2 get_network no
|
||||
# longer uses a write transaction. Or call get_network outside
|
||||
# of a transaction.
|
||||
with db_api.CONTEXT_WRITER.using(plugin_context):
|
||||
port_db = self.plugin._get_port(plugin_context, port_id)
|
||||
port = self.plugin._make_port_dict(port_db)
|
||||
network = self.plugin.get_network(
|
||||
@ -5600,31 +5599,26 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
|
||||
def _add_network_mapping_and_notify(self, context, network_id, bd, epg,
|
||||
vrf):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
self._add_network_mapping(context.session, network_id, bd, epg,
|
||||
vrf)
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
self._add_network_mapping(session, network_id, bd, epg, vrf)
|
||||
registry.notify(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
|
||||
self, context=context, network_id=network_id)
|
||||
|
||||
def _set_network_epg_and_notify(self, context, mapping, epg):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
self._set_network_epg(mapping, epg)
|
||||
registry.notify(aim_cst.GBP_NETWORK_EPG, events.PRECOMMIT_UPDATE,
|
||||
self, context=context,
|
||||
network_id=mapping.network_id)
|
||||
|
||||
def _set_network_vrf_and_notify(self, context, mapping, vrf):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
self._set_network_vrf(mapping, vrf)
|
||||
registry.notify(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
|
||||
self, context=context,
|
||||
network_id=mapping.network_id)
|
||||
|
||||
def validate_aim_mapping(self, mgr):
|
||||
# First do any cleanup and/or migration of Neutron resources
|
||||
# used internally by the legacy plugins.
|
||||
self._validate_legacy_resources(mgr)
|
||||
|
||||
# Register all AIM resource types used by mapping.
|
||||
mgr.register_aim_resource_class(aim_infra.HostDomainMappingV2)
|
||||
mgr.register_aim_resource_class(aim_resource.ApplicationProfile)
|
||||
@ -5697,112 +5691,6 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
# validation CLI tool, but are baked in order to speed up unit
|
||||
# test execution, where they are called repeatedly.
|
||||
|
||||
def _validate_legacy_resources(self, mgr):
|
||||
# Delete legacy SNAT ports.
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.Port.id))
|
||||
query += lambda q: q.filter_by(
|
||||
name=LEGACY_SNAT_PORT_NAME,
|
||||
device_owner=LEGACY_SNAT_PORT_DEVICE_OWNER)
|
||||
for port_id, in query(mgr.actual_session):
|
||||
if mgr.should_repair(
|
||||
"legacy APIC driver SNAT port %s" % port_id, "Deleting"):
|
||||
try:
|
||||
# REVISIT: Move outside of transaction.
|
||||
with gbp_utils.transaction_guard_disabled(
|
||||
mgr.actual_context):
|
||||
self.plugin.delete_port(mgr.actual_context, port_id)
|
||||
except n_exceptions.NeutronException as exc:
|
||||
mgr.validation_failed(
|
||||
"deleting legacy APIC driver SNAT port %s failed "
|
||||
"with %s" % (port_id, exc))
|
||||
|
||||
# Delete legacy SNAT subnets.
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.Subnet.id))
|
||||
query += lambda q: q.filter_by(
|
||||
name=LEGACY_SNAT_SUBNET_NAME)
|
||||
for subnet_id, in query(mgr.actual_session):
|
||||
subnet = self.plugin.get_subnet(mgr.actual_context, subnet_id)
|
||||
net = self.plugin.get_network(
|
||||
mgr.actual_context, subnet['network_id'])
|
||||
net_name = net['name']
|
||||
if net_name and net_name.startswith(LEGACY_SNAT_NET_NAME_PREFIX):
|
||||
ext_net_id = net_name[len(LEGACY_SNAT_NET_NAME_PREFIX):]
|
||||
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.Network))
|
||||
query += lambda q: q.filter_by(
|
||||
id=sa.bindparam('ext_net_id'))
|
||||
ext_net = query(mgr.actual_session).params(
|
||||
ext_net_id=ext_net_id).one_or_none()
|
||||
|
||||
if ext_net and ext_net.external:
|
||||
if mgr.should_repair(
|
||||
"legacy APIC driver SNAT subnet %s" %
|
||||
subnet['cidr'],
|
||||
"Migrating"):
|
||||
try:
|
||||
del subnet['id']
|
||||
del subnet['project_id']
|
||||
subnet['tenant_id'] = ext_net.project_id
|
||||
subnet['network_id'] = ext_net.id
|
||||
subnet['name'] = 'SNAT host pool'
|
||||
subnet[cisco_apic.SNAT_HOST_POOL] = True
|
||||
# REVISIT: Move outside of transaction.
|
||||
with gbp_utils.transaction_guard_disabled(
|
||||
mgr.actual_context):
|
||||
subnet = self.plugin.create_subnet(
|
||||
mgr.actual_context, {'subnet': subnet})
|
||||
except n_exceptions.NeutronException as exc:
|
||||
mgr.validation_failed(
|
||||
"Migrating legacy APIC driver SNAT subnet %s "
|
||||
"failed with %s" % (subnet['cidr'], exc))
|
||||
if mgr.should_repair(
|
||||
"legacy APIC driver SNAT subnet %s" % subnet_id,
|
||||
"Deleting"):
|
||||
try:
|
||||
# REVISIT: Move outside of transaction.
|
||||
with gbp_utils.transaction_guard_disabled(
|
||||
mgr.actual_context):
|
||||
self.plugin.delete_subnet(
|
||||
mgr.actual_context, subnet_id)
|
||||
except n_exceptions.NeutronException as exc:
|
||||
mgr.validation_failed(
|
||||
"deleting legacy APIC driver SNAT subnet %s failed "
|
||||
"with %s" % (subnet_id, exc))
|
||||
|
||||
# Delete legacy SNAT networks.
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.Network.id))
|
||||
query += lambda q: q.filter(
|
||||
models_v2.Network.name.startswith(LEGACY_SNAT_NET_NAME_PREFIX))
|
||||
for net_id, in query(mgr.actual_session):
|
||||
if mgr.should_repair(
|
||||
"legacy APIC driver SNAT network %s" % net_id,
|
||||
"Deleting"):
|
||||
try:
|
||||
# REVISIT: Move outside of transaction.
|
||||
with gbp_utils.transaction_guard_disabled(
|
||||
mgr.actual_context):
|
||||
self.plugin.delete_network(mgr.actual_context, net_id)
|
||||
except n_exceptions.NeutronException as exc:
|
||||
mgr.validation_failed(
|
||||
"deleting legacy APIC driver SNAT network %s failed "
|
||||
"with %s" % (net_id, exc))
|
||||
|
||||
# REVISIT: Without this expunge_all call, the
|
||||
# test_legacy_cleanup UT intermittently fails with the
|
||||
# subsequent validation steps attempting to repair missing
|
||||
# subnet extension data, changing the apic:snat_host_pool
|
||||
# value of the migrated SNAT subnet from True to False. The
|
||||
# way the extension_db module creates the SubnetExtensionDb
|
||||
# instance during create_subnet is apparently not updating the
|
||||
# relationship from a cached Subnet instance. Until this issue
|
||||
# is understood and resolved, we expunge all instances from
|
||||
# the session before proceeding.
|
||||
mgr.actual_session.expunge_all()
|
||||
|
||||
def _validate_static_resources(self, mgr):
|
||||
self._ensure_common_tenant(mgr.expected_aim_ctx)
|
||||
self._ensure_unrouted_vrf(mgr.expected_aim_ctx)
|
||||
|
@ -20,7 +20,6 @@ import sqlalchemy as sa
|
||||
from sqlalchemy.ext import baked
|
||||
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db.extra_dhcp_opt import models as dhcp_models
|
||||
from neutron.db.models import allowed_address_pair as aap_models
|
||||
from neutron.db.models import dns as dns_models
|
||||
@ -40,6 +39,7 @@ from oslo_log import log
|
||||
import oslo_messaging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import constants
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import db
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import extension_db
|
||||
@ -178,7 +178,7 @@ class TopologyRpcEndpoint(object):
|
||||
class ApicRpcHandlerMixin(object):
|
||||
|
||||
def _start_rpc_listeners(self):
|
||||
conn = n_rpc.create_connection()
|
||||
conn = n_rpc.Connection()
|
||||
|
||||
# Opflex RPC handler.
|
||||
self._opflex_endpoint = o_rpc.GBPServerRpcCallback(
|
||||
@ -282,7 +282,7 @@ class ApicRpcHandlerMixin(object):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def _get_vrf_details(self, context, vrf_id):
|
||||
vrf_tenant_name, vrf_name = vrf_id.split(' ')
|
||||
with db_api.context_manager.reader.using(context) as session:
|
||||
with db_api.CONTEXT_READER.using(context) as session:
|
||||
vrf_subnets = self._query_vrf_subnets(
|
||||
session, vrf_tenant_name, vrf_name)
|
||||
return {
|
||||
@ -309,7 +309,7 @@ class ApicRpcHandlerMixin(object):
|
||||
# Start a read-only transaction. Separate read-write
|
||||
# transactions will be used if needed to bind the port or
|
||||
# assign SNAT IPs.
|
||||
with db_api.context_manager.reader.using(context) as session:
|
||||
with db_api.CONTEXT_READER.using(context) as session:
|
||||
# Extract possibly truncated port ID from device.
|
||||
#
|
||||
# REVISIT: If device identifies the port by its MAC
|
||||
|
@ -13,15 +13,14 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron.plugins.ml2.common import exceptions as ml2_exc
|
||||
from neutron.plugins.ml2 import managers
|
||||
from neutron.quota import resource_registry
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@ -30,25 +29,6 @@ class MechanismManager(managers.MechanismManager):
|
||||
def __init__(self):
|
||||
super(MechanismManager, self).__init__()
|
||||
|
||||
def _call_on_drivers(self, method_name, context,
|
||||
continue_on_failure=False, raise_db_retriable=False):
|
||||
super(MechanismManager, self)._call_on_drivers(
|
||||
method_name, context, continue_on_failure=False,
|
||||
raise_db_retriable=False)
|
||||
if method_name.endswith('_precommit'):
|
||||
# This does the same thing as:
|
||||
# https://github.com/openstack/neutron/blob/newton-eol/neutron/
|
||||
# api/v2/base.py#L489
|
||||
# but from within the scope of the plugin's transaction, such
|
||||
# that if it fails, everything that happened prior to this in
|
||||
# precommit phase can also be rolled back.
|
||||
resource_name = method_name.replace('_precommit', '').replace(
|
||||
'create_', '').replace('update_', '').replace(
|
||||
'delete_', '')
|
||||
tracked_resource = resource_registry.get_resource(resource_name)
|
||||
tracked_resource._dirty_tenants.add(context.current['tenant_id'])
|
||||
resource_registry.set_resources_dirty(context._plugin_context)
|
||||
|
||||
def _call_on_extended_drivers(self, method_name, context,
|
||||
continue_on_failure=False,
|
||||
raise_db_retriable=False):
|
||||
|
@ -57,181 +57,12 @@ def new_get_admin_context():
|
||||
nlib_ctx.get_admin_context = new_get_admin_context
|
||||
|
||||
|
||||
from neutron.plugins.ml2 import ovo_rpc
|
||||
|
||||
|
||||
# The Neutron code is instrumented to warn whenever
|
||||
# AFTER_CREATE/UPDATE event notification handling is done within a
|
||||
# transaction. To prevent this warning from being triggered when
|
||||
# Neutron API methods are called within a transaction from the
|
||||
# validation tool, we monkey-patch Neutron to not enforce this session
|
||||
# semantic when GUARD_TRANSACTION is set to False on the context.
|
||||
#
|
||||
# REVISIT: Eliminate this monkey-patch when the validation tool no
|
||||
# longer calls Neutron REST API methods inside a transaction.
|
||||
|
||||
orig_is_session_semantic_violated = (
|
||||
ovo_rpc._ObjectChangeHandler._is_session_semantic_violated)
|
||||
|
||||
|
||||
def new_is_session_semantic_violated(self, context, resource, event):
|
||||
if getattr(context, 'GUARD_TRANSACTION', True):
|
||||
return orig_is_session_semantic_violated(
|
||||
self, context, resource, event)
|
||||
|
||||
|
||||
setattr(ovo_rpc._ObjectChangeHandler, '_is_session_semantic_violated',
|
||||
new_is_session_semantic_violated)
|
||||
|
||||
|
||||
from inspect import isclass
|
||||
from inspect import isfunction
|
||||
from inspect import ismethod
|
||||
|
||||
|
||||
# The undecorated() and looks_like_a_decorator() functions have been
|
||||
# borrowed from the undecorated python library since RPM or Debian
|
||||
# packages are not readily available.
|
||||
def looks_like_a_decorator(a):
|
||||
return (
|
||||
isfunction(a) or ismethod(a) or isclass(a)
|
||||
)
|
||||
|
||||
|
||||
def undecorated(o):
|
||||
"""Remove all decorators from a function, method or class"""
|
||||
# class decorator
|
||||
if type(o) is type:
|
||||
return o
|
||||
|
||||
try:
|
||||
# python2
|
||||
closure = o.func_closure
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# python3
|
||||
closure = o.__closure__
|
||||
except AttributeError:
|
||||
return
|
||||
|
||||
if closure:
|
||||
for cell in closure:
|
||||
# avoid infinite recursion
|
||||
if cell.cell_contents is o:
|
||||
continue
|
||||
|
||||
# check if the contents looks like a decorator; in that case
|
||||
# we need to go one level down into the dream, otherwise it
|
||||
# might just be a different closed-over variable, which we
|
||||
# can ignore.
|
||||
|
||||
# Note: this favors supporting decorators defined without
|
||||
# @wraps to the detriment of function/method/class closures
|
||||
if looks_like_a_decorator(cell.cell_contents):
|
||||
undecd = undecorated(cell.cell_contents)
|
||||
if undecd:
|
||||
return undecd
|
||||
else:
|
||||
return o
|
||||
else:
|
||||
return o
|
||||
|
||||
from neutron.db import common_db_mixin as common_db_api
|
||||
from neutron.db.quota import api as quota_api
|
||||
from neutron.db.quota import driver # noqa
|
||||
from neutron.db.quota import models as quota_models
|
||||
from neutron import quota
|
||||
from neutron.quota import resource_registry as res_reg
|
||||
from oslo_config import cfg
|
||||
|
||||
|
||||
f = quota_api.remove_reservation
|
||||
quota_api.commit_reservation = undecorated(f)
|
||||
|
||||
|
||||
def commit_reservation(context, reservation_id):
|
||||
quota_api.commit_reservation(context, reservation_id, set_dirty=False)
|
||||
|
||||
|
||||
quota.QUOTAS.get_driver().commit_reservation = commit_reservation
|
||||
|
||||
|
||||
def patched_set_resources_dirty(context):
|
||||
if not cfg.CONF.QUOTAS.track_quota_usage:
|
||||
return
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
for res in res_reg.get_all_resources().values():
|
||||
if res_reg.is_tracked(res.name) and res.dirty:
|
||||
dirty_tenants_snap = res._dirty_tenants.copy()
|
||||
for tenant_id in dirty_tenants_snap:
|
||||
query = common_db_api.model_query(
|
||||
context, quota_models.QuotaUsage)
|
||||
query = query.filter_by(resource=res.name).filter_by(
|
||||
tenant_id=tenant_id)
|
||||
usage_data = query.first()
|
||||
# Set dirty if not set already. This effectively
|
||||
# patches the inner notify method:
|
||||
# https://github.com/openstack/neutron/blob/newton-eol/
|
||||
# neutron/api/v2/base.py#L481
|
||||
# to avoid updating the QuotaUsages table outside
|
||||
# from that method (which starts a new transaction).
|
||||
# The dirty marking would have been already done
|
||||
# in the ml2plus manager at the end of the pre_commit
|
||||
# stage (and prior to the plugin initiated transaction
|
||||
# completing).
|
||||
if usage_data and not usage_data.dirty:
|
||||
res.mark_dirty(context)
|
||||
|
||||
|
||||
quota.resource_registry.set_resources_dirty = patched_set_resources_dirty
|
||||
|
||||
|
||||
from oslo_db.sqlalchemy import exc_filters
|
||||
|
||||
|
||||
exc_filters.LOG.exception = exc_filters.LOG.debug
|
||||
|
||||
|
||||
from neutron.db import models_v2
|
||||
from neutron.plugins.ml2 import db as ml2_db
|
||||
from neutron.plugins.ml2 import models
|
||||
from oslo_log import log as logging
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# REVISIT: This method is patched here to remove calls to
|
||||
# with_lockmode('update') from the two queries it makes. It is no
|
||||
# longer used directly by any code in this repository, was no longer
|
||||
# called by upstream neutron runtime code in pike, and was completely
|
||||
# removed from upstream neutron in queens. The patch remains in case
|
||||
# it is needed in ocata and earlier, but should definitely be
|
||||
# eliminated from pike and later branches.
|
||||
def patched_get_locked_port_and_binding(context, port_id):
|
||||
"""Get port and port binding records for update within transaction."""
|
||||
LOG.debug("Using patched_get_locked_port_and_binding")
|
||||
try:
|
||||
port = (context.session.query(models_v2.Port).
|
||||
enable_eagerloads(False).
|
||||
filter_by(id=port_id).
|
||||
one())
|
||||
binding = (context.session.query(models.PortBinding).
|
||||
enable_eagerloads(False).
|
||||
filter_by(port_id=port_id).
|
||||
one())
|
||||
return port, binding
|
||||
except exc.NoResultFound:
|
||||
return None, None
|
||||
|
||||
|
||||
ml2_db.get_locked_port_and_binding = patched_get_locked_port_and_binding
|
||||
|
||||
|
||||
from neutron.db import db_base_plugin_v2
|
||||
|
||||
|
||||
|
@ -23,10 +23,8 @@ from neutron.common import constants as n_const
|
||||
from neutron.common import utils as n_utils
|
||||
from neutron.db import _resource_extend as resource_extend
|
||||
from neutron.db import _utils as db_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db.models import securitygroup as securitygroups_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.db import provisioning_blocks
|
||||
from neutron.plugins.ml2.common import exceptions as ml2_exc
|
||||
from neutron.plugins.ml2 import managers as ml2_managers
|
||||
from neutron.plugins.ml2 import plugin as ml2_plugin
|
||||
@ -44,6 +42,7 @@ from neutron_lib.plugins import directory
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db import implicitsubnetpool_db
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_context
|
||||
@ -52,6 +51,7 @@ from gbpservice.neutron.plugins.ml2plus import managers
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@registry.has_registry_receivers
|
||||
@resource_extend.has_resource_extenders
|
||||
class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
implicitsubnetpool_db.ImplicitSubnetpoolMixin):
|
||||
@ -93,42 +93,6 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
self.type_manager.initialize()
|
||||
self.extension_manager.initialize()
|
||||
self.mechanism_manager.initialize()
|
||||
registry.subscribe(self._port_provisioned, resources.PORT,
|
||||
provisioning_blocks.PROVISIONING_COMPLETE)
|
||||
registry.subscribe(self._handle_segment_change, resources.SEGMENT,
|
||||
events.PRECOMMIT_CREATE)
|
||||
registry.subscribe(self._handle_segment_change, resources.SEGMENT,
|
||||
events.PRECOMMIT_DELETE)
|
||||
registry.subscribe(self._handle_segment_change, resources.SEGMENT,
|
||||
events.AFTER_CREATE)
|
||||
registry.subscribe(self._handle_segment_change, resources.SEGMENT,
|
||||
events.AFTER_DELETE)
|
||||
|
||||
# REVISIT(kent): All the postcommit calls for SG and SG rules are not
|
||||
# currently implemented as they are not needed at this moment.
|
||||
registry.subscribe(self._handle_security_group_change,
|
||||
resources.SECURITY_GROUP, events.PRECOMMIT_CREATE)
|
||||
registry.subscribe(self._handle_security_group_change,
|
||||
resources.SECURITY_GROUP, events.PRECOMMIT_DELETE)
|
||||
registry.subscribe(self._handle_security_group_change,
|
||||
resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE)
|
||||
|
||||
# There is no update event to the security_group_rule
|
||||
registry.subscribe(self._handle_security_group_rule_change,
|
||||
resources.SECURITY_GROUP_RULE,
|
||||
events.PRECOMMIT_CREATE)
|
||||
registry.subscribe(self._handle_security_group_rule_change,
|
||||
resources.SECURITY_GROUP_RULE,
|
||||
events.PRECOMMIT_DELETE)
|
||||
try:
|
||||
registry.subscribe(self._subnet_delete_precommit_handler,
|
||||
resources.SUBNET, events.PRECOMMIT_DELETE)
|
||||
registry.subscribe(self._subnet_delete_after_delete_handler,
|
||||
resources.SUBNET, events.AFTER_DELETE)
|
||||
except AttributeError:
|
||||
LOG.info("Detected older version of Neutron, ML2Plus plugin "
|
||||
"is not subscribed to subnet_precommit_delete and "
|
||||
"subnet_after_delete events")
|
||||
self._setup_dhcp()
|
||||
self._start_rpc_notifiers()
|
||||
self.add_agent_status_check_worker(self.agent_health_check)
|
||||
@ -140,6 +104,10 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
servers.extend(self.mechanism_manager.start_rpc_listeners())
|
||||
return servers
|
||||
|
||||
# REVISIT: Handle directly in mechanism driver?
|
||||
@registry.receives(resources.SECURITY_GROUP,
|
||||
[events.PRECOMMIT_CREATE, events.PRECOMMIT_UPDATE,
|
||||
events.PRECOMMIT_DELETE])
|
||||
def _handle_security_group_change(self, resource, event, trigger,
|
||||
**kwargs):
|
||||
if 'payload' in kwargs:
|
||||
@ -170,6 +138,9 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
self.mechanism_manager.update_security_group_precommit(
|
||||
mech_context)
|
||||
|
||||
# REVISIT: Handle directly in mechanism driver?
|
||||
@registry.receives(resources.SECURITY_GROUP_RULE,
|
||||
[events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE])
|
||||
def _handle_security_group_rule_change(self, resource, event, trigger,
|
||||
**kwargs):
|
||||
context = kwargs.get('context')
|
||||
@ -183,7 +154,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
if event == events.PRECOMMIT_DELETE:
|
||||
sg_rule = {'id': kwargs.get('security_group_rule_id'),
|
||||
'security_group_id': kwargs.get('security_group_id'),
|
||||
'tenant_id': context.tenant}
|
||||
'tenant_id': context.project_id}
|
||||
mech_context = driver_context.SecurityGroupRuleContext(
|
||||
self, context, sg_rule)
|
||||
self.mechanism_manager.delete_security_group_rule_precommit(
|
||||
@ -322,7 +293,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
# pre_commit. We need to extend_dict function to pick up the changes
|
||||
# from the pre_commit operations as well.
|
||||
def _create_network_db(self, context, network):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
result, mech_context = super(
|
||||
Ml2PlusPlugin, self)._create_network_db(
|
||||
context, network)
|
||||
@ -371,7 +342,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_subnetpool(self, context, subnetpool):
|
||||
self._ensure_tenant(context, subnetpool[subnetpool_def.RESOURCE_NAME])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
result = super(Ml2PlusPlugin, self).create_subnetpool(context,
|
||||
subnetpool)
|
||||
self._update_implicit_subnetpool(context, subnetpool, result)
|
||||
@ -395,7 +366,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_subnetpool(self, context, id, subnetpool):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
original_subnetpool = super(Ml2PlusPlugin, self).get_subnetpool(
|
||||
context, id)
|
||||
updated_subnetpool = super(Ml2PlusPlugin, self).update_subnetpool(
|
||||
@ -414,7 +385,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
|
||||
@n_utils.transaction_guard
|
||||
def delete_subnetpool(self, context, id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
subnetpool = super(Ml2PlusPlugin, self).get_subnetpool(context, id)
|
||||
mech_context = driver_context.SubnetPoolContext(
|
||||
self, context, subnetpool)
|
||||
@ -431,7 +402,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
@n_utils.transaction_guard
|
||||
def create_address_scope(self, context, address_scope):
|
||||
self._ensure_tenant(context, address_scope[as_def.ADDRESS_SCOPE])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
result = super(Ml2PlusPlugin, self).create_address_scope(
|
||||
context, address_scope)
|
||||
self.extension_manager.process_create_address_scope(
|
||||
@ -456,7 +427,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
|
||||
@n_utils.transaction_guard
|
||||
def update_address_scope(self, context, id, address_scope):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
original_address_scope = super(Ml2PlusPlugin,
|
||||
self).get_address_scope(context, id)
|
||||
updated_address_scope = super(Ml2PlusPlugin,
|
||||
@ -474,7 +445,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
|
||||
@n_utils.transaction_guard
|
||||
def delete_address_scope(self, context, id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
address_scope = super(Ml2PlusPlugin, self).get_address_scope(
|
||||
context, id)
|
||||
mech_context = driver_context.AddressScopeContext(
|
||||
@ -540,7 +511,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
@db_api.retry_if_session_inactive()
|
||||
def get_networks(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None, page_reverse=False):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
nets_db = super(Ml2PlusPlugin, self)._get_networks(
|
||||
context, filters, None, sorts, limit, marker, page_reverse)
|
||||
|
||||
@ -602,7 +573,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
marker_obj = self._get_marker_obj(context, 'subnet', limit, marker)
|
||||
|
||||
# REVIST(sridar): We need to rethink if we want to support
|
||||
|
@ -18,7 +18,6 @@ from neutron.common import utils as n_utils
|
||||
from neutron.db import _model_query as model_query
|
||||
from neutron.db import _resource_extend as resource_extend
|
||||
from neutron.db import _utils as db_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import dns_db
|
||||
from neutron.db import extraroute_db
|
||||
@ -27,7 +26,6 @@ from neutron.db.models import l3 as l3_db
|
||||
from neutron.quota import resource_registry
|
||||
from neutron_lib.api.definitions import l3 as l3_def
|
||||
from neutron_lib.api.definitions import portbindings
|
||||
from neutron_lib import constants
|
||||
from neutron_lib import exceptions
|
||||
from neutron_lib.plugins import constants
|
||||
from neutron_lib.plugins import directory
|
||||
@ -36,6 +34,7 @@ from oslo_utils import excutils
|
||||
from sqlalchemy import inspect
|
||||
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron import extensions as extensions_pkg
|
||||
from gbpservice.neutron.extensions import cisco_apic_l3 as l3_ext
|
||||
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
|
||||
@ -240,11 +239,13 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
|
||||
# needed, it could me moved to the FLOATING_IP.BEFORE_CREATE
|
||||
# callback in rocky and newer.
|
||||
self._md.ensure_tenant(context, fip['tenant_id'])
|
||||
with db_api.context_manager.reader.using(context):
|
||||
# Verify that subnet is not a SNAT host-pool.
|
||||
#
|
||||
# REVISIT: Replace with FLOATING_IP.PRECOMMIT_CREATE
|
||||
# callback in queens and newer?
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
# Verify that subnet is not a SNAT host-pool. This could
|
||||
# be done from a FLOATING_IP.PRECOMMIT_CREATE callback,
|
||||
# but that callback is made after a FIP port has been
|
||||
# allocated from the subnet. An exception would cause that
|
||||
# port to be deleted, but we are better off not trying to
|
||||
# allocate from the SNAT subnet in the first place.
|
||||
self._md.check_floatingip_external_address(context, fip)
|
||||
if fip.get('subnet_id') or fip.get('floating_ip_address'):
|
||||
result = super(ApicL3Plugin, self).create_floatingip(
|
||||
@ -252,7 +253,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
|
||||
else:
|
||||
# Iterate over non SNAT host-pool subnets and try to
|
||||
# allocate an address.
|
||||
with db_api.context_manager.reader.using(context):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
other_subs = self._md.get_subnets_for_fip(context, fip)
|
||||
result = None
|
||||
for ext_sn in other_subs:
|
||||
@ -275,7 +276,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
|
||||
# callback, which is called after creation as well, in queens
|
||||
# and newer, or maybe just calling update_floatingip_status
|
||||
# from the MD's create_floatingip method.
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
self.update_floatingip_status(
|
||||
context, result['id'], result['status'])
|
||||
return result
|
||||
@ -293,7 +294,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
|
||||
# update_floatingip_status from the MD's update_floatingip
|
||||
# method.
|
||||
if old_fip['status'] != result['status']:
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
self.update_floatingip_status(
|
||||
context, result['id'], result['status'])
|
||||
return result
|
||||
|
@ -766,7 +766,7 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
|
||||
config_param_values[key] = servicepolicy_fip_ids
|
||||
name = 'gbp_%s_%s' % (policy_rule_set['name'], provider_ptg['name'])
|
||||
|
||||
attrs = {'tenant_id': p_ctx.tenant,
|
||||
attrs = {'tenant_id': p_ctx.project_id,
|
||||
'name': name,
|
||||
'description': "",
|
||||
'servicechain_specs': sc_spec,
|
||||
@ -783,7 +783,7 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
|
||||
context._plugin_context.servicechain_instance = sc_instance
|
||||
self._set_ptg_servicechain_instance_mapping(
|
||||
session, provider_ptg_id, SCI_CONSUMER_NOT_AVAILABLE,
|
||||
sc_instance['id'], p_ctx.tenant)
|
||||
sc_instance['id'], p_ctx.project_id)
|
||||
return sc_instance
|
||||
|
||||
def _set_ptg_servicechain_instance_mapping(self, session, provider_ptg_id,
|
||||
|
@ -20,7 +20,6 @@ from aim import aim_manager
|
||||
from aim.api import resource as aim_resource
|
||||
from aim import context as aim_context
|
||||
from aim import utils as aim_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron import policy
|
||||
from neutron_lib import constants as n_constants
|
||||
from neutron_lib import context as n_context
|
||||
@ -33,6 +32,7 @@ from oslo_utils import excutils
|
||||
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.common import utils as gbp_utils
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
@ -508,7 +508,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
|
||||
|
||||
self._handle_create_network_service_policy(context)
|
||||
|
||||
with db_api.context_manager.writer.using(context) as session:
|
||||
with db_api.CONTEXT_WRITER.using(context) as session:
|
||||
l2p_db = context._plugin._get_l2_policy(
|
||||
context._plugin_context, context.current['l2_policy_id'])
|
||||
net = self._get_network(
|
||||
@ -2226,7 +2226,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
|
||||
if not context:
|
||||
context = gbp_utils.get_current_context()
|
||||
# get_network can do a DB write, hence we use a writer
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
query = BAKERY(lambda s: s.query(
|
||||
gpmdb.PolicyTargetGroupMapping))
|
||||
query += lambda q: q.filter_by(
|
||||
|
@ -19,11 +19,11 @@ import copy
|
||||
from aim import aim_store
|
||||
from aim.api import resource as aim_resource
|
||||
from aim import context as aim_context
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib import context
|
||||
from neutron_lib.plugins import directory
|
||||
from oslo_log import log
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.services.grouppolicy import (
|
||||
group_policy_driver_api as api)
|
||||
|
||||
@ -70,7 +70,7 @@ class ValidationManager(object):
|
||||
# REVISIT: Set session's isolation level to serializable?
|
||||
self.actual_context = context.get_admin_context()
|
||||
try:
|
||||
with db_api.context_manager.writer.using(
|
||||
with db_api.CONTEXT_WRITER.using(
|
||||
self.actual_context) as session:
|
||||
self.actual_session = session
|
||||
self.aim_mgr = self.md.aim
|
||||
|
@ -10,10 +10,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib.db import model_base
|
||||
import sqlalchemy as sa
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
|
||||
|
||||
class ServicePolicyPTGIpAddressMapping(model_base.BASEV2):
|
||||
"""Service Policy to IP Address mapping DB."""
|
||||
@ -83,7 +84,7 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
|
||||
def _set_policy_ipaddress_mapping(self, context, service_policy_id,
|
||||
policy_target_group, ipaddress):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
mapping = ServicePolicyPTGIpAddressMapping(
|
||||
service_policy_id=service_policy_id,
|
||||
@ -91,13 +92,13 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
session.add(mapping)
|
||||
|
||||
def _get_ptg_policy_ipaddress_mapping(self, context, policy_target_group):
|
||||
with db_api.context_manager.reader.using(context):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
session = context.session
|
||||
return (session.query(ServicePolicyPTGIpAddressMapping).
|
||||
filter_by(policy_target_group=policy_target_group).first())
|
||||
|
||||
def _delete_policy_ipaddress_mapping(self, context, policy_target_group):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
ip_mapping = session.query(
|
||||
ServicePolicyPTGIpAddressMapping).filter_by(
|
||||
@ -107,7 +108,7 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
|
||||
def _set_ptg_policy_fip_mapping(self, context, service_policy_id,
|
||||
policy_target_group_id, fip_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
mapping = ServicePolicyPTGFipMapping(
|
||||
service_policy_id=service_policy_id,
|
||||
@ -116,14 +117,14 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
session.add(mapping)
|
||||
|
||||
def _get_ptg_policy_fip_mapping(self, context, policy_target_group_id):
|
||||
with db_api.context_manager.reader.using(context):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
session = context.session
|
||||
return (session.query(ServicePolicyPTGFipMapping).
|
||||
filter_by(policy_target_group_id=policy_target_group_id).
|
||||
all())
|
||||
|
||||
def _delete_ptg_policy_fip_mapping(self, context, policy_target_group_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
mappings = session.query(
|
||||
ServicePolicyPTGFipMapping).filter_by(
|
||||
@ -132,7 +133,7 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
session.delete(mapping)
|
||||
|
||||
def _set_pt_floating_ips_mapping(self, context, policy_target_id, fip_ids):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
for fip_id in fip_ids:
|
||||
mapping = PolicyTargetFloatingIPMapping(
|
||||
@ -140,20 +141,20 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
session.add(mapping)
|
||||
|
||||
def _set_pts_floating_ips_mapping(self, context, pt_fip_map):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
for policy_target_id in pt_fip_map:
|
||||
self._set_pt_floating_ips_mapping(
|
||||
context, policy_target_id,
|
||||
pt_fip_map[policy_target_id])
|
||||
|
||||
def _get_pt_floating_ip_mapping(self, context, policy_target_id):
|
||||
with db_api.context_manager.reader.using(context):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
session = context.session
|
||||
return (session.query(PolicyTargetFloatingIPMapping).
|
||||
filter_by(policy_target_id=policy_target_id).all())
|
||||
|
||||
def _delete_pt_floating_ip_mapping(self, context, policy_target_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
fip_mappings = session.query(
|
||||
PolicyTargetFloatingIPMapping).filter_by(
|
||||
@ -162,13 +163,13 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
session.delete(fip_mapping)
|
||||
|
||||
def _get_nsp_qos_mapping(self, context, service_policy_id):
|
||||
with db_api.context_manager.reader.using(context):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
session = context.session
|
||||
return (session.query(ServicePolicyQosPolicyMapping).
|
||||
filter_by(service_policy_id=service_policy_id).first())
|
||||
|
||||
def _set_nsp_qos_mapping(self, context, service_policy_id, qos_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
mapping = ServicePolicyQosPolicyMapping(
|
||||
service_policy_id=service_policy_id,
|
||||
@ -177,6 +178,6 @@ class NetworkServicePolicyMappingMixin(object):
|
||||
|
||||
def _delete_nsp_qos_mapping(self, context, mapping):
|
||||
if mapping:
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
session.delete(mapping)
|
||||
|
@ -16,7 +16,6 @@ import operator
|
||||
from keystoneclient import exceptions as k_exceptions
|
||||
from keystoneclient.v2_0 import client as k_client
|
||||
from neutron.common import exceptions as neutron_exc
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import securitygroup as ext_sg
|
||||
from neutron_lib.api.definitions import port as port_def
|
||||
@ -37,6 +36,7 @@ from sqlalchemy.orm import exc as sa_exc
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.common import utils
|
||||
from gbpservice.network.neutronv2 import local_api
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
|
||||
from gbpservice.neutron.extensions import driver_proxy_group as proxy_ext
|
||||
@ -1496,7 +1496,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
|
||||
l3p_req[self.L3P_SUBNETPOOLS_KEYS[family]] = [
|
||||
default_pool['id']]
|
||||
|
||||
with db_api.context_manager.writer.using(context._plugin_context):
|
||||
with db_api.CONTEXT_WRITER.using(context._plugin_context):
|
||||
l3p_db = context._plugin._get_l3_policy(
|
||||
context._plugin_context, l3p_req['id'])
|
||||
|
||||
@ -3358,7 +3358,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
|
||||
|
||||
def _delete_ptg_qos_policy(self, context, qos_policy_id):
|
||||
qos_rules = self._get_qos_rules(context._plugin_context, qos_policy_id)
|
||||
with db_api.context_manager.writer.using(context._plugin_context):
|
||||
with db_api.CONTEXT_WRITER.using(context._plugin_context):
|
||||
for qos_rule in qos_rules:
|
||||
self._delete_qos_rule(context._plugin_context,
|
||||
qos_rule['id'], qos_policy_id)
|
||||
|
@ -14,7 +14,6 @@ import netaddr
|
||||
import six
|
||||
|
||||
from neutron.common import utils as n_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.quota import resource_registry
|
||||
from neutron_lib.api.definitions import portbindings
|
||||
from neutron_lib import constants
|
||||
@ -26,6 +25,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.common import utils as gbp_utils
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db
|
||||
from gbpservice.neutron import extensions as gbp_extensions
|
||||
@ -363,7 +363,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
new_status = {resource_name: {'status': updated_status,
|
||||
'status_details':
|
||||
updated_status_details}}
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
getattr(super(GroupPolicyPlugin, self),
|
||||
"update_" + resource_name)(
|
||||
context, _resource['id'], new_status)
|
||||
@ -374,7 +374,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
def _get_resource(self, context, resource_name, resource_id,
|
||||
gbp_context_name, fields=None):
|
||||
# The following is a writer because we do DB write for status
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
get_method = "".join(['get_', resource_name])
|
||||
result = getattr(super(GroupPolicyPlugin, self), get_method)(
|
||||
@ -395,7 +395,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
filters=None, fields=None, sorts=None, limit=None,
|
||||
marker=None, page_reverse=False):
|
||||
# The following is a writer because we do DB write for status
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
resource_plural = gbp_utils.get_resource_plural(resource_name)
|
||||
get_resources_method = "".join(['get_', resource_plural])
|
||||
@ -469,7 +469,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_policy_target(self, context, policy_target):
|
||||
self._ensure_tenant(context, policy_target['policy_target'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
self._add_fixed_ips_to_port_attributes(policy_target)
|
||||
result = super(GroupPolicyPlugin,
|
||||
@ -499,7 +499,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_policy_target(self, context, policy_target_id, policy_target):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
self._add_fixed_ips_to_port_attributes(policy_target)
|
||||
original_policy_target = self.get_policy_target(context,
|
||||
@ -526,7 +526,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_policy_target(self, context, policy_target_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
policy_target = self.get_policy_target(context, policy_target_id)
|
||||
policy_context = p_context.PolicyTargetContext(
|
||||
self, context, policy_target)
|
||||
@ -565,7 +565,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
def create_policy_target_group(self, context, policy_target_group):
|
||||
self._ensure_tenant(context,
|
||||
policy_target_group['policy_target_group'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_policy_target_group(
|
||||
@ -596,7 +596,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_policy_target_group(self, context, policy_target_group_id,
|
||||
policy_target_group):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_policy_target_group = self.get_policy_target_group(
|
||||
context, policy_target_group_id)
|
||||
@ -636,7 +636,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_policy_target_group(self, context, policy_target_group_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
policy_target_group = self.get_policy_target_group(
|
||||
context, policy_target_group_id)
|
||||
pt_ids = policy_target_group['policy_targets']
|
||||
@ -716,7 +716,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
application_policy_group):
|
||||
self._ensure_tenant(
|
||||
context, application_policy_group['application_policy_group'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
pdm = self.policy_driver_manager
|
||||
result = super(GroupPolicyPlugin,
|
||||
@ -747,7 +747,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
def update_application_policy_group(self, context,
|
||||
application_policy_group_id,
|
||||
application_policy_group):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
pdm = self.policy_driver_manager
|
||||
original_application_policy_group = (
|
||||
@ -780,7 +780,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_application_policy_group(self, context,
|
||||
application_policy_group_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pdm = self.policy_driver_manager
|
||||
application_policy_group = self.get_application_policy_group(
|
||||
context, application_policy_group_id)
|
||||
@ -822,7 +822,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_l2_policy(self, context, l2_policy):
|
||||
self._ensure_tenant(context, l2_policy['l2_policy'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_l2_policy(context, l2_policy)
|
||||
@ -849,7 +849,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_l2_policy(self, context, l2_policy_id, l2_policy):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_l2_policy = self.get_l2_policy(context, l2_policy_id)
|
||||
updated_l2_policy = super(GroupPolicyPlugin,
|
||||
@ -874,7 +874,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_l2_policy(self, context, l2_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
l2_policy = self.get_l2_policy(context, l2_policy_id)
|
||||
policy_context = p_context.L2PolicyContext(self, context,
|
||||
l2_policy)
|
||||
@ -913,7 +913,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
def create_network_service_policy(self, context, network_service_policy):
|
||||
self._ensure_tenant(
|
||||
context, network_service_policy['network_service_policy'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_network_service_policy(
|
||||
@ -946,7 +946,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_network_service_policy(self, context, network_service_policy_id,
|
||||
network_service_policy):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_network_service_policy = super(
|
||||
GroupPolicyPlugin, self).get_network_service_policy(
|
||||
@ -977,7 +977,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_network_service_policy(
|
||||
self, context, network_service_policy_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
network_service_policy = self.get_network_service_policy(
|
||||
context, network_service_policy_id)
|
||||
policy_context = p_context.NetworkServicePolicyContext(
|
||||
@ -1018,7 +1018,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_l3_policy(self, context, l3_policy):
|
||||
self._ensure_tenant(context, l3_policy['l3_policy'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_l3_policy(context, l3_policy)
|
||||
@ -1047,7 +1047,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_l3_policy(self, context, l3_policy_id, l3_policy):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_l3_policy = self.get_l3_policy(context, l3_policy_id)
|
||||
updated_l3_policy = super(
|
||||
@ -1073,7 +1073,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_l3_policy(self, context, l3_policy_id, check_unused=False):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
if (check_unused and
|
||||
(session.query(group_policy_mapping_db.L2PolicyMapping).
|
||||
@ -1117,7 +1117,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_policy_classifier(self, context, policy_classifier):
|
||||
self._ensure_tenant(context, policy_classifier['policy_classifier'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(
|
||||
GroupPolicyPlugin, self).create_policy_classifier(
|
||||
@ -1147,7 +1147,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_policy_classifier(self, context, id, policy_classifier):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_policy_classifier = super(
|
||||
GroupPolicyPlugin, self).get_policy_classifier(context, id)
|
||||
@ -1173,7 +1173,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_policy_classifier(self, context, id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
policy_classifier = self.get_policy_classifier(context, id)
|
||||
policy_context = p_context.PolicyClassifierContext(
|
||||
self, context, policy_classifier)
|
||||
@ -1212,7 +1212,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_policy_action(self, context, policy_action):
|
||||
self._ensure_tenant(context, policy_action['policy_action'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_policy_action(context, policy_action)
|
||||
@ -1242,7 +1242,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_policy_action(self, context, id, policy_action):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_policy_action = super(
|
||||
GroupPolicyPlugin, self).get_policy_action(context, id)
|
||||
@ -1269,7 +1269,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_policy_action(self, context, id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
policy_action = self.get_policy_action(context, id)
|
||||
policy_context = p_context.PolicyActionContext(self, context,
|
||||
policy_action)
|
||||
@ -1306,7 +1306,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_policy_rule(self, context, policy_rule):
|
||||
self._ensure_tenant(context, policy_rule['policy_rule'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(
|
||||
GroupPolicyPlugin, self).create_policy_rule(
|
||||
@ -1335,7 +1335,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_policy_rule(self, context, id, policy_rule):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_policy_rule = super(
|
||||
GroupPolicyPlugin, self).get_policy_rule(context, id)
|
||||
@ -1360,7 +1360,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_policy_rule(self, context, id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
policy_rule = self.get_policy_rule(context, id)
|
||||
policy_context = p_context.PolicyRuleContext(self, context,
|
||||
policy_rule)
|
||||
@ -1398,7 +1398,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_policy_rule_set(self, context, policy_rule_set):
|
||||
self._ensure_tenant(context, policy_rule_set['policy_rule_set'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_policy_rule_set(
|
||||
@ -1428,7 +1428,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_policy_rule_set(self, context, id, policy_rule_set):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_policy_rule_set = super(
|
||||
GroupPolicyPlugin, self).get_policy_rule_set(context, id)
|
||||
@ -1454,7 +1454,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_policy_rule_set(self, context, id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
policy_rule_set = self.get_policy_rule_set(context, id)
|
||||
policy_context = p_context.PolicyRuleSetContext(
|
||||
self, context, policy_rule_set)
|
||||
@ -1491,7 +1491,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_external_segment(self, context, external_segment):
|
||||
self._ensure_tenant(context, external_segment['external_segment'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_external_segment(context,
|
||||
@ -1525,7 +1525,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_external_segment(self, context, external_segment_id,
|
||||
external_segment):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_external_segment = super(
|
||||
GroupPolicyPlugin, self).get_external_segment(
|
||||
@ -1556,7 +1556,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_external_segment(self, context, external_segment_id):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es = self.get_external_segment(context, external_segment_id)
|
||||
if es['l3_policies'] or es['nat_pools'] or es['external_policies']:
|
||||
raise gpex.ExternalSegmentInUse(es_id=es['id'])
|
||||
@ -1598,7 +1598,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_external_policy(self, context, external_policy):
|
||||
self._ensure_tenant(context, external_policy['external_policy'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin,
|
||||
self).create_external_policy(
|
||||
@ -1629,7 +1629,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_external_policy(self, context, external_policy_id,
|
||||
external_policy):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_external_policy = super(
|
||||
GroupPolicyPlugin, self).get_external_policy(
|
||||
@ -1658,7 +1658,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_external_policy(self, context, external_policy_id,
|
||||
check_unused=False):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es = self.get_external_policy(context, external_policy_id)
|
||||
policy_context = p_context.ExternalPolicyContext(
|
||||
self, context, es)
|
||||
@ -1696,7 +1696,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_nat_pool(self, context, nat_pool):
|
||||
self._ensure_tenant(context, nat_pool['nat_pool'])
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
result = super(GroupPolicyPlugin, self).create_nat_pool(
|
||||
context, nat_pool)
|
||||
@ -1723,7 +1723,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_nat_pool(self, context, nat_pool_id, nat_pool):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
session = context.session
|
||||
original_nat_pool = super(
|
||||
GroupPolicyPlugin, self).get_nat_pool(context, nat_pool_id)
|
||||
@ -1746,7 +1746,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
|
||||
@n_utils.transaction_guard
|
||||
@db_api.retry_if_session_inactive()
|
||||
def delete_nat_pool(self, context, nat_pool_id, check_unused=False):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
es = self.get_nat_pool(context, nat_pool_id)
|
||||
policy_context = p_context.NatPoolContext(self, context, es)
|
||||
(self.policy_driver_manager.delete_nat_pool_precommit(
|
||||
|
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as oslo_db_excp
|
||||
@ -20,6 +19,7 @@ from oslo_utils import excutils
|
||||
from sqlalchemy import exc as sqlalchemy_exc
|
||||
import stevedore
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.services.grouppolicy import (
|
||||
group_policy_driver_api as api)
|
||||
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
|
||||
|
@ -10,7 +10,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib import constants
|
||||
from neutron_lib.plugins import constants
|
||||
from neutron_lib.plugins import directory
|
||||
|
||||
from gbpservice.common import utils
|
||||
|
@ -13,7 +13,6 @@
|
||||
import time
|
||||
|
||||
from heatclient import exc as heat_exc
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import models_v2 as ndb
|
||||
from neutron_lib.db import model_base
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
@ -25,6 +24,7 @@ from oslo_serialization import jsonutils
|
||||
import sqlalchemy as sa
|
||||
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.services.servicechain.plugins.ncp import (
|
||||
exceptions as exc)
|
||||
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
|
||||
|
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron.quota import resource_registry
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from oslo_config import cfg
|
||||
@ -19,6 +18,7 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.common import utils
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db import servicechain_db
|
||||
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
|
||||
from gbpservice.neutron.services.grouppolicy.common import utils as gutils
|
||||
@ -78,7 +78,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
|
||||
deployers = {}
|
||||
# REVISIT: Consider adding ensure_tenant() call here
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
instance = super(NodeCompositionPlugin,
|
||||
self).create_servicechain_instance(
|
||||
context, servicechain_instance)
|
||||
@ -154,7 +154,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
deployers = {}
|
||||
updaters = {}
|
||||
destroyers = {}
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
original_instance = self.get_servicechain_instance(
|
||||
context, servicechain_instance_id)
|
||||
updated_instance = super(
|
||||
@ -191,21 +191,21 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
When a Servicechain Instance is deleted, all its nodes need to be
|
||||
destroyed.
|
||||
"""
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
instance = self.get_servicechain_instance(context,
|
||||
servicechain_instance_id)
|
||||
destroyers = self._get_scheduled_drivers(context, instance,
|
||||
'destroy')
|
||||
self._destroy_servicechain_nodes(context, destroyers)
|
||||
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
super(NodeCompositionPlugin, self).delete_servicechain_instance(
|
||||
context, servicechain_instance_id)
|
||||
|
||||
@log.log_method_call
|
||||
def create_servicechain_node(self, context, servicechain_node):
|
||||
# REVISIT: Consider adding ensure_tenant() call here
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
result = super(NodeCompositionPlugin,
|
||||
self).create_servicechain_node(context,
|
||||
servicechain_node)
|
||||
@ -222,7 +222,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
reconfiguration.
|
||||
"""
|
||||
updaters = {}
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
original_sc_node = self.get_servicechain_node(
|
||||
context, servicechain_node_id)
|
||||
updated_sc_node = super(NodeCompositionPlugin,
|
||||
@ -265,7 +265,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
@log.log_method_call
|
||||
def create_servicechain_spec(self, context, servicechain_spec):
|
||||
# REVISIT: Consider adding ensure_tenant() call here
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
result = super(
|
||||
NodeCompositionPlugin, self).create_servicechain_spec(
|
||||
context, servicechain_spec, set_params=False)
|
||||
@ -275,7 +275,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
@log.log_method_call
|
||||
def update_servicechain_spec(self, context, servicechain_spec_id,
|
||||
servicechain_spec):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
original_sc_spec = self.get_servicechain_spec(
|
||||
context, servicechain_spec_id)
|
||||
updated_sc_spec = super(NodeCompositionPlugin,
|
||||
@ -301,7 +301,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
@log.log_method_call
|
||||
def create_service_profile(self, context, service_profile):
|
||||
# REVISIT: Consider adding ensure_tenant() call here
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
result = super(
|
||||
NodeCompositionPlugin, self).create_service_profile(
|
||||
context, service_profile)
|
||||
@ -311,7 +311,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
@log.log_method_call
|
||||
def update_service_profile(self, context, service_profile_id,
|
||||
service_profile):
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
original_profile = self.get_service_profile(
|
||||
context, service_profile_id)
|
||||
updated_profile = super(NodeCompositionPlugin,
|
||||
@ -481,7 +481,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
|
||||
|
||||
def _get_resource(self, context, resource_name, resource_id, fields=None):
|
||||
deployers = {}
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
resource = getattr(super(NodeCompositionPlugin,
|
||||
self), 'get_' + resource_name)(context, resource_id)
|
||||
if resource_name == 'servicechain_instance':
|
||||
|
@ -48,12 +48,11 @@ class FlowclassifierAIMDriverBase(base.FlowClassifierDriverBase):
|
||||
pass
|
||||
|
||||
|
||||
@registry.has_registry_receivers
|
||||
class FlowclassifierAIMDriver(FlowclassifierAIMDriverBase):
|
||||
"""SFC Driver mapping for AIM."""
|
||||
|
||||
def initialize(self):
|
||||
registry.subscribe(self._handle_network_delete, resources.NETWORK,
|
||||
events.PRECOMMIT_DELETE)
|
||||
self._core_plugin = None
|
||||
|
||||
@property
|
||||
@ -135,6 +134,7 @@ class FlowclassifierAIMDriver(FlowclassifierAIMDriverBase):
|
||||
|
||||
return classifier_ids
|
||||
|
||||
@registry.receives(resources.NETWORK, [events.PRECOMMIT_DELETE])
|
||||
def _handle_network_delete(self, rtype, event, trigger, context,
|
||||
network_id, **kwargs):
|
||||
flc_ids = self._get_classifiers_by_network_id(context, network_id)
|
||||
|
@ -24,17 +24,18 @@ from networking_sfc.extensions import flowclassifier as flowc_ext
|
||||
from networking_sfc.extensions import sfc as sfc_ext
|
||||
from networking_sfc.services.sfc.common import context as sfc_ctx
|
||||
from networking_sfc.services.sfc.drivers import base
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import models_v2
|
||||
from neutron_lib.callbacks import events
|
||||
from neutron_lib.callbacks import registry
|
||||
from neutron_lib import constants as n_constants
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from neutron_lib.plugins import directory
|
||||
from oslo_log import log as logging
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.ext import baked
|
||||
from sqlalchemy import or_
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import constants
|
||||
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
|
||||
@ -86,6 +87,7 @@ class SfcAIMDriverBase(base.SfcDriverBase):
|
||||
pass
|
||||
|
||||
|
||||
@registry.has_registry_receivers
|
||||
class SfcAIMDriver(SfcAIMDriverBase):
|
||||
"""SFC Driver mapping for AIM."""
|
||||
|
||||
@ -99,19 +101,6 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
self._aim_flowc_driver = None
|
||||
self.name_mapper = apic_mapper.APICNameMapper()
|
||||
self.aim = aim_manager.AimManager()
|
||||
# We don't care about deletion, that is managed by the database layer
|
||||
# (can't delete a flowclassifier if in use).
|
||||
for event in [events.PRECOMMIT_UPDATE, events.PRECOMMIT_CREATE]:
|
||||
registry.subscribe(self._handle_flow_classifier,
|
||||
constants.GBP_FLOW_CLASSIFIER, event)
|
||||
registry.subscribe(self._handle_port_bound, constants.GBP_PORT,
|
||||
events.PRECOMMIT_UPDATE)
|
||||
registry.subscribe(self._handle_net_gbp_change,
|
||||
constants.GBP_NETWORK_EPG, events.PRECOMMIT_UPDATE)
|
||||
registry.subscribe(self._handle_net_gbp_change,
|
||||
constants.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE)
|
||||
registry.subscribe(self._handle_net_link_change,
|
||||
constants.GBP_NETWORK_LINK, events.PRECOMMIT_UPDATE)
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
@ -135,7 +124,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
@property
|
||||
def l3_plugin(self):
|
||||
if not self._l3_plugin:
|
||||
self._l3_plugin = directory.get_plugin(n_constants.L3)
|
||||
self._l3_plugin = directory.get_plugin(pconst.L3)
|
||||
if not self._l3_plugin:
|
||||
LOG.error("No L3 service plugin found.")
|
||||
raise exc.GroupPolicyDeploymentError()
|
||||
@ -676,7 +665,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
|
||||
def _get_chains_by_classifier_id(self, plugin_context, flowc_id):
|
||||
context = plugin_context
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
query = BAKERY(lambda s: s.query(
|
||||
sfc_db.ChainClassifierAssoc))
|
||||
query += lambda q: q.filter_by(
|
||||
@ -691,7 +680,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
if not ppg_ids:
|
||||
return []
|
||||
context = plugin_context
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
query = BAKERY(lambda s: s.query(
|
||||
sfc_db.ChainGroupAssoc))
|
||||
query += lambda q: q.filter(
|
||||
@ -706,7 +695,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
def _get_groups_by_pair_id(self, plugin_context, pp_id):
|
||||
# NOTE(ivar): today, port pair can be associated only to one PPG
|
||||
context = plugin_context
|
||||
with db_api.context_manager.writer.using(context):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
pp_db = self.sfc_plugin._get_port_pair(plugin_context, pp_id)
|
||||
if pp_db and pp_db.portpairgroup_id:
|
||||
return self.sfc_plugin.get_port_pair_groups(
|
||||
@ -867,6 +856,10 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
# Every port pair will return the same result
|
||||
return ingress_vrf, egress_vrf
|
||||
|
||||
# We don't care about deletion, that is managed by the database
|
||||
# layer (can't delete a flowclassifier if in use).
|
||||
@registry.receives(constants.GBP_FLOW_CLASSIFIER,
|
||||
[events.PRECOMMIT_CREATE, events.PRECOMMIT_UPDATE])
|
||||
def _handle_flow_classifier(self, rtype, event, trigger, driver_context,
|
||||
**kwargs):
|
||||
if event == events.PRECOMMIT_UPDATE:
|
||||
@ -879,6 +872,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
chain, chain)
|
||||
self.update_port_chain_precommit(c_ctx, remap=True)
|
||||
|
||||
@registry.receives(constants.GBP_PORT, [events.PRECOMMIT_UPDATE])
|
||||
def _handle_port_bound(self, rtype, event, trigger, driver_context,
|
||||
**kwargs):
|
||||
if event == events.PRECOMMIT_UPDATE:
|
||||
@ -914,6 +908,8 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
pp, pp)
|
||||
self.update_port_pair_precommit(d_ctx, remap=True)
|
||||
|
||||
@registry.receives(constants.GBP_NETWORK_EPG, [events.PRECOMMIT_UPDATE])
|
||||
@registry.receives(constants.GBP_NETWORK_VRF, [events.PRECOMMIT_UPDATE])
|
||||
def _handle_net_gbp_change(self, rtype, event, trigger, context,
|
||||
network_id, **kwargs):
|
||||
chains = {}
|
||||
@ -933,6 +929,7 @@ class SfcAIMDriver(SfcAIMDriverBase):
|
||||
flowcs, ppgs = self._get_pc_flowcs_and_ppgs(context, chain)
|
||||
self._validate_port_chain(context, chain, flowcs, ppgs)
|
||||
|
||||
@registry.receives(constants.GBP_NETWORK_LINK, [events.PRECOMMIT_UPDATE])
|
||||
def _handle_net_link_change(self, rtype, event, trigger, context,
|
||||
networks_map, host_links, host, **kwargs):
|
||||
aim_ctx = aim_context.AimContext(db_session=context.session)
|
||||
|
@ -23,7 +23,6 @@ from neutron import policy
|
||||
from neutron.services.trunk.rpc import server as trunk_server
|
||||
from neutron.tests.unit.api import test_extensions
|
||||
from neutron.tests.unit.db import test_db_base_plugin_v2
|
||||
from neutron_lib import constants as nl_constants
|
||||
from neutron_lib import context
|
||||
from neutron_lib.plugins import constants
|
||||
from neutron_lib.plugins import directory
|
||||
@ -404,7 +403,7 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
|
||||
plugins = directory.get_plugins()
|
||||
self._gbp_plugin = plugins.get(constants.GROUP_POLICY)
|
||||
self._sc_plugin = plugins.get(constants.SERVICECHAIN)
|
||||
self._l3_plugin = plugins.get(nl_constants.L3)
|
||||
self._l3_plugin = plugins.get(constants.L3)
|
||||
self._set_notification_mocks()
|
||||
# The following is done to stop the neutron code from checking
|
||||
# for dhcp agents
|
||||
|
@ -14,10 +14,10 @@
|
||||
import copy
|
||||
import fixtures
|
||||
import mock
|
||||
from neutron.db import api as db_api
|
||||
from neutron.tests import base
|
||||
from neutron_lib import context
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import exceptions as nfp_exc
|
||||
from gbpservice.nfp.orchestrator.db import nfp_db
|
||||
@ -33,7 +33,7 @@ class SqlFixture(fixtures.Fixture):
|
||||
|
||||
def _setUp(self):
|
||||
# Register all data models
|
||||
engine = db_api.context_manager.writer.get_engine()
|
||||
engine = db_api.CONTEXT_WRITER.get_engine()
|
||||
if not SqlFixture._TABLES_ESTABLISHED:
|
||||
nfp_db_model.BASE.metadata.create_all(engine)
|
||||
SqlFixture._TABLES_ESTABLISHED = True
|
||||
|
@ -37,7 +37,6 @@ from aim import utils as aim_utils
|
||||
|
||||
from keystoneclient.v3 import client as ksc_client
|
||||
from neutron.api import extensions
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import provisioning_blocks
|
||||
from neutron.db import segments_db
|
||||
from neutron.plugins.ml2 import driver_context
|
||||
@ -52,12 +51,14 @@ from neutron.tests.unit import testlib_api
|
||||
from neutron_lib.api.definitions import portbindings
|
||||
from neutron_lib import constants as n_constants
|
||||
from neutron_lib import context as n_context
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from neutron_lib.plugins import directory
|
||||
from opflexagent import constants as ofcst
|
||||
from oslo_config import cfg
|
||||
import webob.exc
|
||||
|
||||
from gbpservice.neutron.db import all_models # noqa
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.extensions import cisco_apic_l3 as l3_ext
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import ( # noqa
|
||||
config as aimcfg)
|
||||
@ -208,7 +209,7 @@ class AimSqlFixture(fixtures.Fixture):
|
||||
self.useFixture(testlib_api.StaticSqlFixture())
|
||||
|
||||
# Register all data models.
|
||||
engine = db_api.context_manager.writer.get_engine()
|
||||
engine = db_api.CONTEXT_WRITER.get_engine()
|
||||
if not AimSqlFixture._AIM_TABLES_ESTABLISHED:
|
||||
aim_model_base.Base.metadata.create_all(engine)
|
||||
AimSqlFixture._AIM_TABLES_ESTABLISHED = True
|
||||
@ -338,7 +339,7 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
|
||||
self.plugin = directory.get_plugin()
|
||||
self.driver = self.plugin.mechanism_manager.mech_drivers[
|
||||
'apic_aim'].obj
|
||||
self.l3_plugin = directory.get_plugin(n_constants.L3)
|
||||
self.l3_plugin = directory.get_plugin(pconst.L3)
|
||||
self.aim_mgr = aim_manager.AimManager()
|
||||
self._app_profile_name = self.driver.ap_name
|
||||
self.extension_attributes = ('router:external', DN,
|
||||
@ -568,9 +569,12 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
|
||||
|
||||
def _check_binding(self, port_id, expected_binding_info=None,
|
||||
top_bound_physnet=None, bottom_bound_physnet=None):
|
||||
port_context = self.plugin.get_bound_port_context(
|
||||
port_context = self.driver.make_port_context(
|
||||
n_context.get_admin_context(), port_id)
|
||||
self.assertIsNotNone(port_context)
|
||||
self.assertNotIn(port_context.vif_type,
|
||||
[portbindings.VIF_TYPE_UNBOUND,
|
||||
portbindings.VIF_TYPE_BINDING_FAILED])
|
||||
binding_info = [(bl['bound_driver'],
|
||||
bl['bound_segment']['network_type'])
|
||||
for bl in port_context.binding_levels]
|
||||
@ -622,16 +626,8 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
|
||||
return self.deserialize(self.fmt, req.get_response(self.api))
|
||||
|
||||
def _net_2_epg(self, network):
|
||||
if network['router:external']:
|
||||
epg = aim_resource.EndpointGroup.from_dn(
|
||||
return aim_resource.EndpointGroup.from_dn(
|
||||
network['apic:distinguished_names']['EndpointGroup'])
|
||||
else:
|
||||
epg = aim_resource.EndpointGroup(
|
||||
tenant_name=self.name_mapper.project(
|
||||
None, network['tenant_id']),
|
||||
app_profile_name=self._app_profile_name,
|
||||
name=self.name_mapper.network(None, network['id']))
|
||||
return epg
|
||||
|
||||
|
||||
class TestRpcListeners(ApicAimTestCase):
|
||||
@ -4903,7 +4899,7 @@ class TestPortBinding(ApicAimTestCase):
|
||||
# Bind to non-opflex host
|
||||
p1 = self._bind_port_to_host(p1['id'], 'host1')['port']
|
||||
self.assertNotEqual('binding_failed', p1['binding:vif_type'])
|
||||
p1_ctx = self.plugin.get_bound_port_context(
|
||||
p1_ctx = self.driver.make_port_context(
|
||||
n_context.get_admin_context(), p1['id'])
|
||||
self.assertEqual('opflex', p1_ctx.top_bound_segment['network_type'])
|
||||
self.assertEqual('vlan', p1_ctx.bottom_bound_segment['network_type'])
|
||||
@ -4912,7 +4908,7 @@ class TestPortBinding(ApicAimTestCase):
|
||||
self._register_agent('host2', AGENT_CONF_OPFLEX)
|
||||
p2 = self._bind_port_to_host(p2['id'], 'host2')['port']
|
||||
self.assertNotEqual('binding_failed', p2['binding:vif_type'])
|
||||
p2_ctx = self.plugin.get_bound_port_context(
|
||||
p2_ctx = self.driver.make_port_context(
|
||||
n_context.get_admin_context(), p2['id'])
|
||||
self.assertEqual('opflex', p2_ctx.top_bound_segment['network_type'])
|
||||
self.assertEqual('vlan', p2_ctx.bottom_bound_segment['network_type'])
|
||||
|
@ -18,12 +18,12 @@ import testtools
|
||||
|
||||
from neutron.api import extensions
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.conf.plugins.ml2 import config # noqa
|
||||
from neutron.conf.plugins.ml2.drivers import driver_type
|
||||
from neutron.tests.unit.api import test_extensions
|
||||
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
|
||||
from neutron.tests.unit.extensions import test_address_scope
|
||||
from neutron_lib.agent import topics
|
||||
from neutron_lib.plugins import directory
|
||||
from oslo_config import cfg
|
||||
|
||||
@ -75,7 +75,7 @@ class TestRpcListeners(Ml2PlusPluginV2TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _start_rpc_listeners(self):
|
||||
conn = n_rpc.create_connection()
|
||||
conn = n_rpc.Connection()
|
||||
conn.create_consumer('q-test-topic', [])
|
||||
return conn.consume_in_threads()
|
||||
|
||||
|
@ -23,21 +23,22 @@ from aim import context as aim_context
|
||||
from keystoneclient.v3 import client as ksc_client
|
||||
from netaddr import IPSet
|
||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.common import utils as n_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db.models import securitygroup as sg_models
|
||||
from neutron.db.port_security import models as psec_models
|
||||
from neutron.extensions import dns
|
||||
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
|
||||
from neutron.tests.unit.extensions import test_address_scope
|
||||
from neutron.tests.unit.extensions import test_securitygroup
|
||||
from neutron_lib.api import extensions
|
||||
from neutron_lib import constants as n_constants
|
||||
from neutron_lib import context as nctx
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from neutron_lib.plugins import directory
|
||||
from opflexagent import constants as ocst
|
||||
from oslo_config import cfg
|
||||
import webob.exc
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db
|
||||
from gbpservice.neutron.extensions import cisco_apic
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
|
||||
@ -159,7 +160,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
||||
trunk_plugin=trunk_plugin)
|
||||
self.db_session = db_api.get_writer_session()
|
||||
self.initialize_db_config(self.db_session)
|
||||
self.l3_plugin = directory.get_plugin(n_constants.L3)
|
||||
self.l3_plugin = directory.get_plugin(pconst.L3)
|
||||
config.cfg.CONF.set_override('network_vlan_ranges',
|
||||
['physnet1:1000:1099'],
|
||||
group='ml2_type_vlan')
|
||||
@ -301,11 +302,11 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
||||
|
||||
def _switch_to_tenant1(self):
|
||||
self._tenant_id = self.first_tenant_id
|
||||
self._neutron_context.tenant = self._tenant_id
|
||||
self._neutron_context.project_id = self._tenant_id
|
||||
|
||||
def _switch_to_tenant2(self):
|
||||
self._tenant_id = 'test_tenant-2'
|
||||
self._neutron_context.tenant = self._tenant_id
|
||||
self._neutron_context.project_id = self._tenant_id
|
||||
|
||||
def _show_subnet(self, id):
|
||||
req = self.new_show_request('subnets', id, fmt=self.fmt)
|
||||
@ -563,7 +564,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
||||
self._extend_subnet_prefixes(prefixes_list, l3p, version)
|
||||
self.assertItemsEqual(subnetpool_prefixes, prefixes_list)
|
||||
|
||||
params = {'ids': ascp_ids}
|
||||
params = '&'.join('id=' + id for id in ascp_ids)
|
||||
req = self.new_list_request('address-scopes',
|
||||
params=params, fmt=self.fmt)
|
||||
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
|
||||
@ -626,7 +627,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
||||
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
|
||||
else:
|
||||
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
|
||||
params = {'ids': ascp_ids}
|
||||
params = '&'.join('id=' + id for id in ascp_ids)
|
||||
req = self.new_list_request('address-scopes',
|
||||
params=params, fmt=self.fmt)
|
||||
res = req.get_response(self.ext_api)
|
||||
@ -3477,7 +3478,7 @@ class TestPolicyTarget(AIMBaseTestCase,
|
||||
self._update('subnets', ext_net2_sub2['id'],
|
||||
{'subnet': {SNAT_HOST_POOL: True}})
|
||||
self.assertTrue(
|
||||
n_utils.is_extension_supported(self._l3_plugin,
|
||||
extensions.is_extension_supported(self._l3_plugin,
|
||||
dns.Dns.get_alias()))
|
||||
network = self._make_network(self.fmt, 'net1', True,
|
||||
arg_list=('dns_domain',),
|
||||
|
@ -25,7 +25,6 @@ from neutron.tests.unit.extensions import test_securitygroup
|
||||
from neutron_lib import constants as n_constants
|
||||
from neutron_lib import context as n_context
|
||||
from oslo_config import cfg
|
||||
import webob.exc
|
||||
|
||||
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
|
||||
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
|
||||
@ -1007,125 +1006,6 @@ class TestNeutronMapping(AimValidationTestCase):
|
||||
port_id=port['id']).update({'host': 'yyy'})
|
||||
self._validate_fails_binding_ports()
|
||||
|
||||
def test_legacy_cleanup(self):
|
||||
# Create pre-existing AIM VRF.
|
||||
vrf = aim_resource.VRF(tenant_name='common', name='v1', monitored=True)
|
||||
self.aim_mgr.create(self.aim_ctx, vrf)
|
||||
|
||||
# Create pre-existing AIM L3Outside.
|
||||
l3out = aim_resource.L3Outside(
|
||||
tenant_name='common', name='l1', vrf_name='v1', monitored=True)
|
||||
self.aim_mgr.create(self.aim_ctx, l3out)
|
||||
|
||||
# Create pre-existing AIM ExternalNetwork.
|
||||
ext_net = aim_resource.ExternalNetwork(
|
||||
tenant_name='common', l3out_name='l1', name='n1', monitored=True)
|
||||
self.aim_mgr.create(self.aim_ctx, ext_net)
|
||||
|
||||
# Create external network.
|
||||
kwargs = {'router:external': True,
|
||||
'apic:distinguished_names':
|
||||
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
||||
ext_net = self._make_network(
|
||||
self.fmt, 'ext_net', True, tenant_id='tenant_1',
|
||||
arg_list=self.extension_attributes, **kwargs)['network']
|
||||
ext_net_id = ext_net['id']
|
||||
|
||||
# Create router using external network.
|
||||
kwargs = {'external_gateway_info': {'network_id': ext_net_id}}
|
||||
router = self._make_router(
|
||||
self.fmt, self._tenant_id, 'router1',
|
||||
arg_list=self.extension_attributes, **kwargs)['router']
|
||||
router_id = router['id']
|
||||
|
||||
# Create internal network and subnet.
|
||||
int_net_resp = self._make_network(self.fmt, 'net1', True)
|
||||
int_net = int_net_resp['network']
|
||||
int_net_id = int_net['id']
|
||||
int_subnet = self._make_subnet(
|
||||
self.fmt, int_net_resp, '10.0.1.1', '10.0.1.0/24')['subnet']
|
||||
int_subnet_id = int_subnet['id']
|
||||
|
||||
# Add internal subnet to router.
|
||||
self.l3_plugin.add_router_interface(
|
||||
n_context.get_admin_context(), router_id,
|
||||
{'subnet_id': int_subnet_id})
|
||||
|
||||
# Validate just to make sure everything is OK before creating
|
||||
# legacy plugin's SNAT-related resources.
|
||||
self._validate()
|
||||
|
||||
# Create legacy plugin's SNAT-related Neutron network.
|
||||
leg_net_resp = self._make_network(
|
||||
self.fmt,
|
||||
'host-snat-network-for-internal-use-' + ext_net_id, False)
|
||||
leg_net = leg_net_resp['network']
|
||||
leg_net_id = leg_net['id']
|
||||
|
||||
# Create legacy plugin's SNAT-related Neutron subnet.
|
||||
leg_subnet = self._make_subnet(
|
||||
self.fmt, leg_net_resp, '66.66.66.1', '66.66.66.0/24',
|
||||
enable_dhcp=False)['subnet']
|
||||
leg_subnet_id = leg_subnet['id']
|
||||
data = {'subnet': {'name': 'host-snat-pool-for-internal-use'}}
|
||||
leg_subnet = self._update('subnets', leg_subnet_id, data)['subnet']
|
||||
|
||||
# Create legacy plugin's SNAT-related Neutron port.
|
||||
fixed_ips = [{'subnet_id': leg_subnet_id, 'ip_address': '66.66.66.5'}]
|
||||
leg_port = self._make_port(
|
||||
self.fmt, leg_net_id, fixed_ips=fixed_ips,
|
||||
name='host-snat-pool-port-for-internal-use',
|
||||
device_owner='host-snat-pool-port-device-owner-internal-use'
|
||||
)['port']
|
||||
leg_port_id = leg_port['id']
|
||||
|
||||
# Delete all networks' mapping and extension records to
|
||||
# simulate migration use case.
|
||||
net_ids = [ext_net_id, int_net_id, leg_net_id]
|
||||
(self.db_session.query(db.NetworkMapping).
|
||||
filter(db.NetworkMapping.network_id.in_(net_ids)).
|
||||
delete(synchronize_session=False))
|
||||
(self.db_session.query(ext_db.NetworkExtensionDb).
|
||||
filter(ext_db.NetworkExtensionDb.network_id.in_(net_ids)).
|
||||
delete(synchronize_session=False))
|
||||
|
||||
# Delete all subnets' extension records to simulate migration
|
||||
# use case.
|
||||
subnet_ids = [int_subnet_id, leg_subnet_id]
|
||||
(self.db_session.query(ext_db.SubnetExtensionDb).
|
||||
filter(ext_db.SubnetExtensionDb.subnet_id.in_(subnet_ids)).
|
||||
delete(synchronize_session=False))
|
||||
|
||||
# Test migration.
|
||||
cfg.CONF.set_override(
|
||||
'migrate_ext_net_dns',
|
||||
{ext_net_id: 'uni/tn-common/out-l1/instP-n1'},
|
||||
group='ml2_apic_aim')
|
||||
self._validate_repair_validate()
|
||||
|
||||
# Ensure legacy plugin's SNAT-related resources are gone.
|
||||
self._show(
|
||||
'ports', leg_port_id,
|
||||
expected_code=webob.exc.HTTPNotFound.code)
|
||||
self._show(
|
||||
'subnets', leg_subnet_id,
|
||||
expected_code=webob.exc.HTTPNotFound.code)
|
||||
self._show(
|
||||
'networks', leg_net_id,
|
||||
expected_code=webob.exc.HTTPNotFound.code)
|
||||
|
||||
# Ensure new SNAT subnet was properly created on actual
|
||||
# external network.
|
||||
ext_subnets = self._show('networks', ext_net_id)['network']['subnets']
|
||||
self.assertEqual(1, len(ext_subnets))
|
||||
ext_subnet = self._show('subnets', ext_subnets[0])['subnet']
|
||||
self.assertEqual(leg_subnet['cidr'], ext_subnet['cidr'])
|
||||
self.assertEqual(leg_subnet['gateway_ip'], ext_subnet['gateway_ip'])
|
||||
self.assertEqual(leg_subnet['enable_dhcp'], ext_subnet['enable_dhcp'])
|
||||
self.assertEqual('SNAT host pool', ext_subnet['name'])
|
||||
self.assertTrue(ext_subnet['apic:snat_host_pool'])
|
||||
self.assertEqual(ext_net['project_id'], ext_subnet['project_id'])
|
||||
|
||||
|
||||
class TestGbpMapping(AimValidationTestCase):
|
||||
|
||||
|
@ -10,8 +10,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import api as db_api
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy.extensions import (
|
||||
apic_allowed_vm_name_db as db)
|
||||
from gbpservice.neutron.tests.unit.services.grouppolicy import (
|
||||
|
@ -10,8 +10,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.db import api as db_api
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.db.grouppolicy.extensions import (
|
||||
apic_segmentation_label_db as db)
|
||||
from gbpservice.neutron.tests.unit.services.grouppolicy import (
|
||||
|
@ -12,7 +12,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import mock
|
||||
from neutron_lib import constants
|
||||
from neutron_lib import context as nctx
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from neutron_lib.plugins import directory
|
||||
@ -60,7 +59,7 @@ class CommonNeutronBaseTestCase(test_plugin.GroupPolicyPluginTestBase):
|
||||
self._plugin.is_agent_down = mock.Mock(return_value=False)
|
||||
self._context = nctx.get_admin_context()
|
||||
self._gbp_plugin = directory.get_plugin(pconst.GROUP_POLICY)
|
||||
self._l3_plugin = directory.get_plugin(constants.L3)
|
||||
self._l3_plugin = directory.get_plugin(pconst.L3)
|
||||
config.cfg.CONF.set_override('debug', True)
|
||||
|
||||
def get_plugin_context(self):
|
||||
|
@ -108,7 +108,7 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
|
||||
self._plugin.is_agent_down = mock.Mock(return_value=False)
|
||||
self._context = nctx.get_admin_context()
|
||||
self._gbp_plugin = directory.get_plugin(pconst.GROUP_POLICY)
|
||||
self._l3_plugin = directory.get_plugin(cst.L3)
|
||||
self._l3_plugin = directory.get_plugin(pconst.L3)
|
||||
self.saved_keystone_client = resource_mapping.k_client.Client
|
||||
resource_mapping.k_client.Client = mock.Mock()
|
||||
pdm.PolicyDriverManager.get_policy_target_group_status = (
|
||||
@ -1966,7 +1966,7 @@ class TestL3Policy(ResourceMappingTestCase,
|
||||
self._extend_subnet_prefixes(prefixes_list, l3p, version)
|
||||
self.assertItemsEqual(subnetpool_prefixes, prefixes_list)
|
||||
|
||||
params = {'ids': ascp_ids}
|
||||
params = '&'.join('id=' + id for id in ascp_ids)
|
||||
req = self.new_list_request('address-scopes',
|
||||
params=params, fmt=self.fmt)
|
||||
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
|
||||
@ -2019,7 +2019,7 @@ class TestL3Policy(ResourceMappingTestCase,
|
||||
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
|
||||
else:
|
||||
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
|
||||
params = {'ids': ascp_ids}
|
||||
params = '&'.join('id=' + id for id in ascp_ids)
|
||||
req = self.new_list_request('address-scopes',
|
||||
params=params, fmt=self.fmt)
|
||||
res = req.get_response(self.ext_api)
|
||||
|
@ -22,7 +22,6 @@ from networking_sfc.services.flowclassifier.common import config as flc_cfg
|
||||
from networking_sfc.services.flowclassifier import driver_manager as fc_driverm
|
||||
from networking_sfc.services.sfc.common import config as sfc_cfg
|
||||
from networking_sfc.services.sfc import driver_manager as sfc_driverm
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db.models import l3 as l3_db
|
||||
from neutron_lib.callbacks import exceptions as c_exc
|
||||
from neutron_lib import context
|
||||
@ -30,6 +29,7 @@ from neutron_lib.plugins import directory
|
||||
from opflexagent import constants as ofcst
|
||||
from oslo_log import log as logging
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.services.grouppolicy import config
|
||||
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
|
||||
from gbpservice.neutron.tests.unit.services.grouppolicy import (
|
||||
@ -1104,7 +1104,7 @@ class TestPortChain(TestAIMServiceFunctionChainingBase):
|
||||
self._aim_context, aim_res.EndpointGroup(
|
||||
tenant_name='new', app_profile_name='new', name='new'))
|
||||
try:
|
||||
with db_api.context_manager.writer.using(self._ctx):
|
||||
with db_api.CONTEXT_WRITER.using(self._ctx):
|
||||
net_db = self._plugin._get_network(
|
||||
self._ctx, fc['l7_parameters']['logical_source_network'])
|
||||
self.assertRaises(c_exc.CallbackFailure,
|
||||
@ -1117,7 +1117,7 @@ class TestPortChain(TestAIMServiceFunctionChainingBase):
|
||||
except Rollback:
|
||||
pass
|
||||
try:
|
||||
with db_api.context_manager.writer.using(self._ctx):
|
||||
with db_api.CONTEXT_WRITER.using(self._ctx):
|
||||
net_db = self._plugin._get_network(
|
||||
self._ctx,
|
||||
fc['l7_parameters']['logical_destination_network'])
|
||||
@ -1132,7 +1132,7 @@ class TestPortChain(TestAIMServiceFunctionChainingBase):
|
||||
pass
|
||||
# Also changing EPG affects PC if tenant changes
|
||||
try:
|
||||
with db_api.context_manager.writer.using(self._ctx):
|
||||
with db_api.CONTEXT_WRITER.using(self._ctx):
|
||||
net_db = self._plugin._get_network(
|
||||
self._ctx,
|
||||
fc['l7_parameters']['logical_destination_network'])
|
||||
@ -1163,7 +1163,7 @@ class TestPortChain(TestAIMServiceFunctionChainingBase):
|
||||
|
||||
pp = self.show_port_pair(ppg['port_pairs'][0])['port_pair']
|
||||
net = self._get_port_network(pp['ingress'])
|
||||
with db_api.context_manager.writer.using(self._ctx):
|
||||
with db_api.CONTEXT_WRITER.using(self._ctx):
|
||||
# Modifying EPG in service nets has no effect
|
||||
net_db = self._plugin._get_network(self._ctx, net['id'])
|
||||
self.aim_mech._set_network_epg_and_notify(
|
||||
@ -1172,7 +1172,7 @@ class TestPortChain(TestAIMServiceFunctionChainingBase):
|
||||
app_profile_name='new',
|
||||
name='new'))
|
||||
|
||||
with db_api.context_manager.writer.using(self._ctx):
|
||||
with db_api.CONTEXT_WRITER.using(self._ctx):
|
||||
# But it fails when VRF is changed
|
||||
net_db = self._plugin._get_network(self._ctx, net['id'])
|
||||
self.assertRaises(c_exc.CallbackFailure,
|
||||
|
@ -15,12 +15,12 @@ import copy
|
||||
import time
|
||||
|
||||
from heatclient import exc as heat_exc
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib.plugins import constants as pconst
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
import yaml
|
||||
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
|
||||
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
|
@ -14,6 +14,7 @@
|
||||
import oslo_messaging as messaging
|
||||
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import topics as nsf_topics
|
||||
from gbpservice.nfp.common import utils as nfp_utils
|
||||
@ -27,7 +28,6 @@ from gbpservice.nfp.orchestrator.db import nfp_db as nfp_db
|
||||
from gbpservice.nfp.orchestrator.drivers import orchestration_driver
|
||||
from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib import context as n_context
|
||||
|
||||
import copy
|
||||
|
@ -11,12 +11,12 @@
|
||||
# under the License.
|
||||
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.db import api as db_api
|
||||
from neutron_lib import context as n_context
|
||||
from oslo_log import helpers as log_helpers
|
||||
import oslo_messaging
|
||||
|
||||
from gbpservice._i18n import _
|
||||
from gbpservice.neutron.db import api as db_api
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import exceptions as nfp_exc
|
||||
from gbpservice.nfp.common import topics as nfp_rpc_topics
|
||||
|
3
setup.py
3
setup.py
@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -26,5 +25,5 @@ except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr>=1.8'],
|
||||
setup_requires=['pbr>=2.0.0'],
|
||||
pbr=True)
|
||||
|
Loading…
Reference in New Issue
Block a user