use context manager from neutron-lib

Change-Id: Iee1dc60862b12d48104325d2798fb881c5edcc9d
This commit is contained in:
Pulkit vajpayee 2023-09-15 04:03:36 +00:00
parent 1dc38b63a5
commit d0d6094bee
16 changed files with 1372 additions and 1061 deletions

View File

@ -445,7 +445,13 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
def _get_l3_policy(self, context, l3_policy_id):
try:
return db_api.get_by_id(context, L3Policy, l3_policy_id)
l3p_db = db_api.get_by_id(context, L3Policy, l3_policy_id)
# REVISIT: l3p_db is a db variable. Can't use it outside the
# session becuase at some places it is throwing an error that
# l3p_db should be bound to a session. Hence, reassigning.
for l3p in l3p_db:
l3p_db[l3p[0]] = l3p[1]
return l3p_db
except exc.NoResultFound:
raise gpolicy.L3PolicyNotFound(l3_policy_id=l3_policy_id)
@ -529,7 +535,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
if not action_id_list:
pr_db.policy_actions = []
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
# We will first check if the new list of actions is valid
filters = {'id': [a_id for a_id in action_id_list]}
actions_in_db = db_api.get_collection_query(context, PolicyAction,
@ -557,7 +563,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
def _validate_policy_rule_set_list(self, context,
policy_rule_sets_id_list):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(context):
filters = {'id': [c_id for c_id in policy_rule_sets_id_list]}
policy_rule_sets_in_db = db_api.get_collection_query(
context, PolicyRuleSet, filters=filters)
@ -599,7 +605,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
else:
db_res.consumed_policy_rule_sets = []
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
policy_rule_sets_id_list = list(policy_rule_sets_dict.keys())
# We will first check if the new list of policy_rule_sets is valid
self._validate_policy_rule_set_list(
@ -630,7 +636,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
# Only one hierarchy level allowed for now
raise gpolicy.ThreeLevelPolicyRuleSetHierarchyNotSupported(
policy_rule_set_id=policy_rule_set_db['id'])
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
# We will first check if the new list of policy_rule_sets is valid
policy_rule_sets_in_db = self._validate_policy_rule_set_list(
@ -657,7 +663,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
if not rule_id_list:
prs_db.policy_rules = []
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
# We will first check if the new list of rules is valid
filters = {'id': [r_id for r_id in rule_id_list]}
rules_in_db = db_api.get_collection_query(context, PolicyRule,
@ -702,24 +708,24 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
return res
def _set_l3_policy_for_l2_policy(self, context, l2p_id, l3p_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l2p_db = self._get_l2_policy(context, l2p_id)
l2p_db.l3_policy_id = l3p_id
def _set_l2_policy_for_policy_target_group(self, context, ptg_id, l2p_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
ptg_db = self._get_policy_target_group(context, ptg_id)
ptg_db.l2_policy_id = l2p_id
def _set_application_policy_group_for_policy_target_group(
self, context, ptg_id, apg_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
ptg_db = self._get_policy_target_group(context, ptg_id)
ptg_db.application_policy_group_id = apg_id
def _set_network_service_policy_for_policy_target_group(
self, context, ptg_id, nsp_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
ptg_db = self._get_policy_target_group(context, ptg_id)
ptg_db.network_service_policy_id = nsp_id
@ -730,7 +736,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
if not params:
nsp_db.network_service_params = []
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
nsp_db.network_service_params = []
for param in params:
param_db = NetworkServiceParam(
@ -744,7 +750,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
if not es_id_list:
ep_db.external_segments = []
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
filters = {'id': es_id_list}
eps_in_db = db_api.get_collection_query(
context, ExternalSegment, filters=filters)
@ -776,7 +782,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
if not es_dict:
l3p_db.external_segments = []
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
# Validate ESs exist
es_set = set(es_dict.keys())
filters = {'id': es_set}
@ -934,7 +940,7 @@ class GroupPolicyDbPlugin(gpolicy.GroupPolicyPluginBase):
res['child_policy_rule_sets'] = [
child_prs['id'] for child_prs in prs['child_policy_rule_sets']]
else:
with ctx.session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(ctx):
filters = {'parent_id': [prs['id']]}
child_prs_in_db = db_api.get_collection_query(
ctx, PolicyRuleSet, filters=filters)

View File

@ -153,7 +153,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
def _get_subnetpools(self, id_list):
context = get_current_context().elevated()
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(context):
filters = {'id': id_list}
return db_api.get_collection_query(
context, models_v2.SubnetPool, filters=filters).all()
@ -196,43 +196,43 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
return db_api.resource_fields(res, fields)
def _set_port_for_policy_target(self, context, pt_id, port_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
pt_db = self._get_policy_target(context, pt_id)
pt_db.port_id = port_id
def _add_subnet_to_policy_target_group(self, context, ptg_id, subnet_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
ptg_db = self._get_policy_target_group(context, ptg_id)
assoc = PTGToSubnetAssociation(policy_target_group_id=ptg_id,
subnet_id=subnet_id)
ptg_db.subnets.append(assoc)
return [subnet.subnet_id for subnet in ptg_db.subnets]
return [subnet.subnet_id for subnet in ptg_db.subnets]
def _remove_subnets_from_policy_target_groups(self, context, subnet_ids):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
assocs = context.session.query(PTGToSubnetAssociation).filter(
PTGToSubnetAssociation.subnet_id.in_(subnet_ids)).all()
for assoc in assocs:
context.session.delete(assoc)
def _remove_subnets_from_policy_target_group(self, context, ptg_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
ptg_db = self._get_policy_target_group(context, ptg_id)
assocs = (context.session.query(PTGToSubnetAssociation).
filter_by(policy_target_group_id=ptg_id).all())
ptg_db.update({'subnets': []})
for assoc in assocs:
context.session.delete(assoc)
return []
return []
def _set_network_for_l2_policy(self, context, l2p_id, network_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l2p_db = self._get_l2_policy(context, l2p_id)
l2p_db.network_id = network_id
def _set_address_scope_for_l3_policy_by_id(
self, context, l3p_id, address_scope_id, ip_version=4):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
self._set_address_scope_for_l3_policy(
context, l3p_db, address_scope_id, ip_version)
@ -242,7 +242,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
if not address_scope_id:
return
# TODO(Sumit): address_scope_id validation
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
if ip_version == 4:
l3p_db.address_scope_v4_id = address_scope_id
else:
@ -250,7 +250,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
def _add_subnetpool_to_l3_policy_by_id(self, context, l3p_id,
subnetpool_id, ip_version=4):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
return self._add_subnetpool_to_l3_policy(
context, l3p_db, subnetpool_id, ip_version)
@ -258,7 +258,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
def _add_subnetpool_to_l3_policy(self, context, l3p_db,
subnetpool_id, ip_version=4):
# TODO(Sumit): subnetpool_id validation
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
if ip_version == 4:
assoc = L3PolicySubnetpoolV4Association(
l3_policy_id=l3p_db['id'], subnetpool_id=subnetpool_id)
@ -267,12 +267,12 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
assoc = L3PolicySubnetpoolV6Association(
l3_policy_id=l3p_db['id'], subnetpool_id=subnetpool_id)
l3p_db.subnetpools_v6.append(assoc)
return {4: [sp.subnetpool_id for sp in l3p_db.subnetpools_v4],
6: [sp.subnetpool_id for sp in l3p_db.subnetpools_v6]}
return {4: [sp.subnetpool_id for sp in l3p_db.subnetpools_v4],
6: [sp.subnetpool_id for sp in l3p_db.subnetpools_v6]}
def _add_subnetpools_to_l3_policy_by_id(self, context, l3p_id,
subnetpools, ip_version=4):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
self._add_subnetpools_to_l3_policy(
context, l3p_db, subnetpools, ip_version)
@ -285,7 +285,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
def _remove_subnetpool_from_l3_policy(self, context, l3p_id,
subnetpool_id, ip_version=4):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
if ip_version == 4:
assoc = (context.session.query(
@ -300,8 +300,8 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
subnetpool_id=subnetpool_id).one())
l3p_db.subnetpools_v6.remove(assoc)
context.session.delete(assoc)
return {4: [sp.subnetpool_id for sp in l3p_db.subnetpools_v4],
6: [sp.subnetpool_id for sp in l3p_db.subnetpools_v6]}
return {4: [sp.subnetpool_id for sp in l3p_db.subnetpools_v4],
6: [sp.subnetpool_id for sp in l3p_db.subnetpools_v6]}
def _update_subnetpools_for_l3_policy(self, context, l3p_id,
subnetpools, ip_version=4):
@ -310,7 +310,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
# there is no PT present on a subnet which belongs to that subnetpool
if not subnetpools:
return
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
new_subnetpools = set(subnetpools)
if ip_version == 4:
@ -342,35 +342,35 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
context.session.delete(assoc)
def _add_router_to_l3_policy(self, context, l3p_id, router_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
assoc = L3PolicyRouterAssociation(l3_policy_id=l3p_id,
router_id=router_id)
l3p_db.routers.append(assoc)
return [router.router_id for router in l3p_db.routers]
return [router.router_id for router in l3p_db.routers]
def _remove_router_from_l3_policy(self, context, l3p_id, router_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
assoc = (context.session.query(L3PolicyRouterAssociation).
filter_by(l3_policy_id=l3p_id, router_id=router_id).
one())
l3p_db.routers.remove(assoc)
context.session.delete(assoc)
return [router.router_id for router in l3p_db.routers]
return [router.router_id for router in l3p_db.routers]
def _set_subnet_to_es(self, context, es_id, subnet_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
es_db = self._get_external_segment(context, es_id)
es_db.subnet_id = subnet_id
def _set_subnet_to_np(self, context, np_id, subnet_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
np_db = self._get_nat_pool(context, np_id)
np_db.subnet_id = subnet_id
def _update_ess_for_l3p(self, context, l3p_id, ess):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
l3p_db = self._get_l3_policy(context, l3p_id)
self._set_ess_for_l3p(context, l3p_db, ess)
@ -391,7 +391,7 @@ class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
return mapping['l3_policy_id']
def _set_db_np_subnet(self, context, nat_pool, subnet_id):
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
nat_pool['subnet_id'] = subnet_id
db_np = self._get_nat_pool(context, nat_pool['id'])
db_np.subnet_id = nat_pool['subnet_id']

View File

@ -31,6 +31,7 @@ from aim.api import resource as aim_resource
from aim import context as aim_context
from aim import utils as aim_utils
from alembic import util as alembic_util
from gbpservice.neutron.db import api as db_api
from neutron.db.migration.cli import CONF
from neutron.db.models import address_scope as as_db
from neutron.db.models import securitygroup as sg_models
@ -132,7 +133,7 @@ def do_apic_aim_persist_migration(session):
aim_ctx = aim_context.AimContext(session)
mapper = apic_mapper.APICNameMapper()
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(session):
# Migrate address scopes.
scope_dbs = (session.query(as_db.AddressScope)
.options(lazyload('*')).all())
@ -269,7 +270,7 @@ def do_ap_name_change(session, conf=None):
aim_ctx = aim_context.AimContext(session)
system_id = cfg.apic_system_id
alembic_util.msg("APIC System ID: %s" % system_id)
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(session):
net_dbs = session.query(models_v2.Network).options(lazyload('*')).all()
for net_db in net_dbs:
ext_db = _get_network_extn_db(session, net_db.id)
@ -361,7 +362,7 @@ def do_apic_aim_security_group_migration(session):
aim = aim_manager.AimManager()
aim_ctx = aim_context.AimContext(session)
mapper = apic_mapper.APICNameMapper()
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(session):
# Migrate SG.
sg_dbs = (session.query(sg_models.SecurityGroup).
options(lazyload('*')).all())
@ -433,7 +434,7 @@ def do_sg_rule_remote_group_id_insertion(session):
aim = aim_manager.AimManager()
aim_ctx = aim_context.AimContext(session)
mapper = apic_mapper.APICNameMapper()
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(session):
sg_rule_dbs = (session.query(sg_models.SecurityGroupRule).
options(lazyload('*')).all())
for sg_rule_db in sg_rule_dbs:
@ -463,7 +464,7 @@ def do_ha_ip_duplicate_entries_removal(session):
haip_ip = HAIPAddressToPortAssociation.c.ha_ip_address
haip_port_id = HAIPAddressToPortAssociation.c.port_id
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(session):
port_and_haip_dbs = (session.query(models_v2.Port,
HAIPAddressToPortAssociation).join(
HAIPAddressToPortAssociation,
@ -495,7 +496,7 @@ def do_ha_ip_network_id_insertion(session):
haip_ip = HAIPAddressToPortAssociation.c.ha_ip_address
haip_port_id = HAIPAddressToPortAssociation.c.port_id
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(session):
haip_ip = HAIPAddressToPortAssociation.c.ha_ip_address
haip_port_id = HAIPAddressToPortAssociation.c.port_id
port_and_haip_dbs = (session.query(models_v2.Port,

View File

@ -329,20 +329,22 @@ class DbMixin(object):
# HAIPAddressToPortAssociation functions.
def _get_ha_ipaddress(self, ipaddress, network_id, session=None):
session = session or db_api.get_reader_session()
ctx = n_context.get_admin_context()
query = BAKERY(lambda s: s.query(
HAIPAddressToPortAssociation))
query += lambda q: q.filter_by(
ha_ip_address=sa.bindparam('ipaddress'),
network_id=sa.bindparam('network_id'))
return query(session).params(
ipaddress=ipaddress, network_id=network_id).first()
with db_api.CONTEXT_READER.using(ctx):
query = BAKERY(lambda s: s.query(
HAIPAddressToPortAssociation))
query += lambda q: q.filter_by(
ha_ip_address=sa.bindparam('ipaddress'),
network_id=sa.bindparam('network_id'))
return query(ctx.session).params(
ipaddress=ipaddress, network_id=network_id).first()
def get_port_for_ha_ipaddress(self, ipaddress, network_id,
session=None):
"""Returns the Neutron Port ID for the HA IP Addresss."""
session = session or db_api.get_reader_session()
ctx = n_context.get_admin_context()
query = BAKERY(lambda s: s.query(
HAIPAddressToPortAssociation))
query += lambda q: q.join(
@ -355,20 +357,30 @@ class DbMixin(object):
sa.bindparam('network_id'))
query += lambda q: q.filter(
models_v2.Port.network_id == sa.bindparam('network_id'))
port_ha_ip = query(session).params(
ipaddress=ipaddress, network_id=network_id).first()
if session:
port_ha_ip = query(session).params(
ipaddress=ipaddress, network_id=network_id).first()
else:
with db_api.CONTEXT_READER.using(ctx):
port_ha_ip = query(ctx.session).params(
ipaddress=ipaddress, network_id=network_id).first()
return port_ha_ip
def get_ha_ipaddresses_for_port(self, port_id, session=None):
"""Returns the HA IP Addressses associated with a Port."""
session = session or db_api.get_reader_session()
ctx = n_context.get_admin_context()
query = BAKERY(lambda s: s.query(
HAIPAddressToPortAssociation))
query += lambda q: q.filter_by(
port_id=sa.bindparam('port_id'))
objs = query(session).params(
port_id=port_id).all()
if session:
objs = query(session).params(
port_id=port_id).all()
else:
with db_api.CONTEXT_READER.using(ctx):
objs = query(ctx.session).params(
port_id=port_id).all()
# REVISIT: Do the sorting in the UT?
return sorted([x['ha_ip_address'] for x in objs])
@ -376,9 +388,9 @@ class DbMixin(object):
def update_port_id_for_ha_ipaddress(self, port_id, ipaddress, network_id,
session=None):
"""Stores a Neutron Port Id as owner of HA IP Addr (idempotent API)."""
session = session or db_api.get_writer_session()
ctx = session or n_context.get_admin_context()
try:
with session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(ctx) as session:
obj = self._get_ha_ipaddress(ipaddress, network_id, session)
if obj:
haip_ip = HAIPAddressToPortAssociation.ha_ip_address
@ -401,8 +413,8 @@ class DbMixin(object):
def delete_port_id_for_ha_ipaddress(self, port_id, ipaddress,
session=None):
session = session or db_api.get_writer_session()
with session.begin(subtransactions=True):
ctx = session or n_context.get_admin_context()
with db_api.CONTEXT_WRITER.using(ctx) as session:
try:
# REVISIT: Can this query be baked? The
# sqlalchemy.ext.baked.Result class does not have a
@ -418,11 +430,12 @@ class DbMixin(object):
# REVISIT: This method is only called from unit tests.
def get_ha_port_associations(self):
session = db_api.get_reader_session()
ctx = n_context.get_admin_context()
query = BAKERY(lambda s: s.query(
HAIPAddressToPortAssociation))
return query(session).all()
with db_api.CONTEXT_READER.using(ctx):
query = BAKERY(lambda s: s.query(
HAIPAddressToPortAssociation))
return query(ctx.session).all()
# REVISIT: Move this method to the mechanism_driver or rpc module,
# as it is above the DB level. This will also require some rework
@ -446,17 +459,17 @@ class DbMixin(object):
if not ipa:
continue
try:
session = db_api.get_writer_session()
with session.begin(subtransactions=True):
ctx = n_context.get_admin_context()
with db_api.CONTEXT_WRITER.using(ctx):
net_id = port['network_id']
old_owner = self.get_port_for_ha_ipaddress(
ipa, network_id or net_id,
session=session)
session=ctx.session)
old_owner_port_id = None
if old_owner:
old_owner_port_id = old_owner['port_id']
self.update_port_id_for_ha_ipaddress(
port_id, ipa, net_id, session)
port_id, ipa, net_id, ctx.session)
if old_owner_port_id and old_owner_port_id != port_id:
ports_to_update.add(old_owner_port_id)
except db_exc.DBReferenceError as dbe:

View File

@ -57,7 +57,7 @@ from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_constants
from neutron_lib import context as nctx
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exceptions
from neutron_lib.plugins import constants as pconst
from neutron_lib.plugins import directory
@ -187,18 +187,22 @@ class KeystoneNotificationEndpoint(object):
# we only update tenants which have been created in APIC. For other
# cases, their nameAlias will be set when the first resource is
# being created under that tenant
session = db_api.get_writer_session()
tenant_aname = self._driver.name_mapper.project(session, tenant_id)
aim_ctx = aim_context.AimContext(session)
tenant = aim_resource.Tenant(name=tenant_aname)
if not self._driver.aim.get(aim_ctx, tenant):
return None
ctx = n_context.get_admin_context()
# TODO(pulkit): replace with AIM writer context once API
# supports it.
with db_api.CONTEXT_WRITER.using(ctx):
tenant_aname = self._driver.name_mapper.project(ctx.session,
tenant_id)
aim_ctx = aim_context.AimContext(ctx.session)
tenant = aim_resource.Tenant(name=tenant_aname)
if not self._driver.aim.get(aim_ctx, tenant):
return None
disp_name = aim_utils.sanitize_display_name(prj_details[0])
descr = aim_utils.sanitize_description(prj_details[1])
self._driver.aim.update(
aim_ctx, tenant, display_name=disp_name, descr=descr)
return oslo_messaging.NotificationResult.HANDLED
disp_name = aim_utils.sanitize_display_name(prj_details[0])
descr = aim_utils.sanitize_description(prj_details[1])
self._driver.aim.update(
aim_ctx, tenant, display_name=disp_name, descr=descr)
return oslo_messaging.NotificationResult.HANDLED
if event_type == 'identity.project.deleted':
if not self._driver.enable_keystone_notification_purge:
@ -211,13 +215,17 @@ class KeystoneNotificationEndpoint(object):
self._driver.project_details_cache.purge_gbp(tenant_id)
# delete the tenant and AP in AIM also
session = db_api.get_writer_session()
tenant_aname = self._driver.name_mapper.project(session, tenant_id)
aim_ctx = aim_context.AimContext(session)
tenant = aim_resource.Tenant(name=tenant_aname)
self._driver.aim.delete(aim_ctx, tenant, cascade=True)
ctx = n_context.get_admin_context()
# TODO(pulkit): replace with AIM writer context once API
# supports it.
with db_api.CONTEXT_WRITER.using(ctx):
tenant_aname = self._driver.name_mapper.project(ctx.session,
tenant_id)
aim_ctx = aim_context.AimContext(ctx.session)
tenant = aim_resource.Tenant(name=tenant_aname)
self._driver.aim.delete(aim_ctx, tenant, cascade=True)
return oslo_messaging.NotificationResult.HANDLED
return oslo_messaging.NotificationResult.HANDLED
@registry.has_registry_receivers
@ -308,7 +316,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
def _update_nova_vm_name_cache(self):
current_time = datetime.now()
context = nctx.get_admin_context()
context = n_context.get_admin_context()
with db_api.CONTEXT_READER.using(context) as session:
vm_name_update = self._get_vm_name_update(session)
is_full_update = True
@ -456,22 +464,24 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
@db_api.retry_db_errors
def _ensure_static_resources(self):
session = db_api.get_writer_session()
aim_ctx = aim_context.AimContext(session)
self._ensure_common_tenant(aim_ctx)
self._ensure_unrouted_vrf(aim_ctx)
self._ensure_any_filter(aim_ctx)
self._setup_default_arp_dhcp_security_group_rules(aim_ctx)
ctx = n_context.get_admin_context()
# TODO(pulkit): replace with AIM writer context once API supports it.
with db_api.CONTEXT_WRITER.using(ctx):
aim_ctx = aim_context.AimContext(ctx.session)
self._ensure_common_tenant(aim_ctx)
self._ensure_unrouted_vrf(aim_ctx)
self._ensure_any_filter(aim_ctx)
self._setup_default_arp_dhcp_security_group_rules(aim_ctx)
# This is required for infra resources needed by ERSPAN
check_topology = self.aim.find(aim_ctx, aim_resource.Topology)
if not check_topology:
topology_aim = aim_resource.Topology()
self.aim.create(aim_ctx, topology_aim)
check_infra = self.aim.find(aim_ctx, aim_resource.Infra)
if not check_infra:
infra_aim = aim_resource.Infra()
self.aim.create(aim_ctx, infra_aim)
# This is required for infra resources needed by ERSPAN
check_topology = self.aim.find(aim_ctx, aim_resource.Topology)
if not check_topology:
topology_aim = aim_resource.Topology()
self.aim.create(aim_ctx, topology_aim)
check_infra = self.aim.find(aim_ctx, aim_resource.Infra)
if not check_infra:
infra_aim = aim_resource.Infra()
self.aim.create(aim_ctx, infra_aim)
def _setup_default_arp_dhcp_security_group_rules(self, aim_ctx):
sg_name = self._default_sg_name
@ -2967,7 +2977,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
def update_summary_resource(self, session, resources, network_id):
for resource in resources:
if type(resource) == aim_resource.SpanVepgSummary:
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
query = BAKERY(lambda s: s.query(
models_v2.Network))
query += lambda q: q.filter_by(
@ -3115,7 +3125,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
raise exceptions.InvalidPortForErspanSession()
# Not supported on SVI networks
ctx = nctx.get_admin_context()
ctx = n_context.get_admin_context()
net_db = self.plugin._get_network(ctx, port['network_id'])
if self._is_svi_db(net_db):
raise exceptions.InvalidNetworkForErspanSession()
@ -3267,7 +3277,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
new_subnets = list(set(curr) - set(orig))
if not new_subnets:
return
ctx = nctx.get_admin_context()
ctx = n_context.get_admin_context()
net_db = self.plugin._get_network(ctx, original_port['network_id'])
if self._is_svi_db(net_db):
self._update_svi_ports_for_added_subnet(ctx, new_subnets,
@ -3507,13 +3517,14 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
context._plugin_context, router_db, context.current, subnets)
def _delete_router_port(self, session, port_id):
query = BAKERY(lambda s: s.query(
l3_db.RouterPort))
query += lambda q: q.filter_by(
port_id=sa.bindparam('port_id'))
db_obj = query(session).params(
port_id=port_id).one()
session.delete(db_obj)
with db_api.CONTEXT_WRITER.using(session):
query = BAKERY(lambda s: s.query(
l3_db.RouterPort))
query += lambda q: q.filter_by(
port_id=sa.bindparam('port_id'))
db_obj = query(session).params(
port_id=port_id).one()
session.delete(db_obj)
def create_security_group_precommit(self, context):
session = context._plugin_context.session
@ -4052,7 +4063,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
if not self._dvs_notifier:
self._dvs_notifier = importutils.import_object(
DVS_AGENT_KLASS,
nctx.get_admin_context_without_session()
n_context.get_admin_context_without_session()
)
return self._dvs_notifier
@ -5043,16 +5054,18 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
ports_to_notify = [port_id]
fixed_ips = [x['ip_address'] for x in port['fixed_ips']]
if fixed_ips:
query = BAKERY(lambda s: s.query(
n_addr_pair_db.AllowedAddressPair))
query += lambda q: q.join(
models_v2.Port,
models_v2.Port.id == n_addr_pair_db.AllowedAddressPair.port_id)
query += lambda q: q.filter(
models_v2.Port.network_id == sa.bindparam('network_id'))
addr_pair = query(plugin_context.session).params(
network_id=port['network_id']).all()
notify_pairs = []
with db_api.CONTEXT_READER.using(plugin_context):
query = BAKERY(lambda s: s.query(
n_addr_pair_db.AllowedAddressPair))
query += lambda q: q.join(
models_v2.Port,
models_v2.Port.id == (
n_addr_pair_db.AllowedAddressPair.port_id))
query += lambda q: q.filter(
models_v2.Port.network_id == sa.bindparam('network_id'))
addr_pair = query(plugin_context.session).params(
network_id=port['network_id']).all()
notify_pairs = []
# In order to support use of CIDRs in allowed-address-pairs,
# we can't include the fxied IPs in the DB query, and instead
# have to qualify that with post-DB processing

View File

@ -187,7 +187,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
# current context, which should be available in
# the session.info's dictionary, with a key of
# 'using_context').
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_network_dict(
session, netdb, result)
else:
@ -202,7 +202,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(netdb)
if session and session.is_active:
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_network_dict_bulk(session,
results)
else:
@ -221,7 +221,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
# current context, which should be available in
# the session.info's dictionary, with a key of
# 'using_context').
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_port_dict(
session, portdb, result)
else:
@ -236,7 +236,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(portdb)
if session and session.is_active:
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_port_dict_bulk(session,
results)
else:
@ -270,7 +270,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(subnetdb)
if session and session.is_active:
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_subnet_dict_bulk(session,
results)
else:
@ -304,7 +304,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(subnetpooldb)
if session and session.is_active:
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_subnetpool_dict_bulk(session,
results)
else:
@ -324,7 +324,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
# current context, which should be available in
# the session.info's dictionary, with a key of
# 'using_context').
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_address_scope_dict(
session, address_scope, result)
else:
@ -339,7 +339,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
plugin = directory.get_plugin()
session = db_api.get_session_from_obj(address_scope)
if session and session.is_active:
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
plugin.extension_manager.extend_address_scope_dict_bulk(
session, results)
else:

View File

@ -175,6 +175,7 @@ class ApicL3Plugin(extraroute_db.ExtraRoute_db_mixin,
# Overwrite the upstream implementation to take advantage
# of the bulk extension support.
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):

View File

@ -373,8 +373,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def create_l2_policy_postcommit(self, context):
if not context.current['l3_policy_id']:
self._use_implicit_l3_policy(context)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
with db_api.CONTEXT_READER.using(context._plugin_context):
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
if not context.current['network_id']:
self._use_implicit_network(
context, address_scope_v4=l3p_db['address_scope_v4_id'],
@ -433,14 +434,15 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def delete_l2_policy_postcommit(self, context):
auto_ptg_id = self._get_auto_ptg_id(context.current['id'])
try:
auto_ptg = context._plugin._get_policy_target_group(
context._plugin_context, auto_ptg_id)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
subnet_ids = [assoc['subnet_id'] for assoc in auto_ptg.subnets]
router_ids = [assoc.router_id for assoc in l3p_db.routers]
context._plugin._remove_subnets_from_policy_target_group(
context._plugin_context, auto_ptg_id)
with db_api.CONTEXT_WRITER.using(context._plugin_context):
auto_ptg = context._plugin._get_policy_target_group(
context._plugin_context, auto_ptg_id)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
subnet_ids = [assoc['subnet_id'] for assoc in auto_ptg.subnets]
router_ids = [assoc.router_id for assoc in l3p_db.routers]
context._plugin._remove_subnets_from_policy_target_group(
context._plugin_context, auto_ptg_id)
self._process_subnets_for_ptg_delete(
context, subnet_ids, router_ids)
# REVISIT: Consider calling the actual GBP plugin instead
@ -632,8 +634,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
l2p_id = ptg['l2_policy_id']
if l2p_id:
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, l2p_id)
with db_api.CONTEXT_READER.using(context):
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, l2p_id)
if not l2p_db['policy_target_groups'] or (
(len(l2p_db['policy_target_groups']) == 1) and (
self._is_auto_ptg(l2p_db['policy_target_groups'][0]))):
@ -1748,8 +1751,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
"_process_contracts_for_default_epg(), create and "
"delete cannot be True at the same time")
raise
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
with db_api.CONTEXT_WRITER.using(context._plugin_context):
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
# Infra Services' FilterEntries and attributes
infra_entries = alib.get_service_contract_filter_entries()

View File

@ -19,6 +19,7 @@ import sqlalchemy as sa
from gbpservice._i18n import _
from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.extensions import driver_proxy_group as pg_ext
from gbpservice.neutron.extensions import group_policy as gbp_ext
from gbpservice.neutron.services.grouppolicy import (
@ -175,7 +176,7 @@ class ImplicitPolicyBase(api.PolicyDriver, local_api.LocalAPI):
session.add(owned)
def _l2_policy_is_owned(self, session, l2p_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedL2Policy).
filter_by(l2_policy_id=l2p_id).
first() is not None)
@ -186,20 +187,26 @@ class ImplicitPolicyBase(api.PolicyDriver, local_api.LocalAPI):
session.add(owned)
def _l3_policy_is_owned(self, session, l3p_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedL3Policy).
filter_by(l3_policy_id=l3p_id).
first() is not None)
def _cleanup_l3_policy(self, context, l3p_id):
if self._l3_policy_is_owned(context._plugin_context.session, l3p_id):
with db_api.CONTEXT_READER.using(context._plugin_context):
res = self._l3_policy_is_owned(context._plugin_context.session,
l3p_id)
if res:
# REVISIT(rkukura): Add check_unused parameter to
# local_api._delete_l3_policy()?
context._plugin.delete_l3_policy(context._plugin_context, l3p_id,
check_unused=True)
def _cleanup_l2_policy(self, context, l2p_id):
if self._l2_policy_is_owned(context._plugin_context.session, l2p_id):
with db_api.CONTEXT_READER.using(context._plugin_context):
res = self._l2_policy_is_owned(context._plugin_context.session,
l2p_id)
if res:
try:
self._delete_l2_policy(context._plugin_context, l2p_id)
except gbp_ext.L2PolicyInUse:

View File

@ -13,6 +13,7 @@
from neutron_lib.plugins import directory
from oslo_log import helpers as log
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.drivers import (
implicit_policy as ipd)
@ -45,8 +46,9 @@ class CommonNeutronBase(ipd.ImplicitPolicyBase, rmd.OwnedResourcesOperations,
def create_l2_policy_postcommit(self, context):
if not context.current['l3_policy_id']:
self._use_implicit_l3_policy(context)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
with db_api.CONTEXT_READER.using(context._plugin_context):
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
if not context.current['network_id']:
self._use_implicit_network(
context, address_scope_v4=l3p_db['address_scope_v4_id'],

View File

@ -166,7 +166,7 @@ class OwnedResourcesOperations(object):
session.add(owned)
def _port_is_owned(self, session, port_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedPort).
filter_by(port_id=port_id).
first() is not None)
@ -177,7 +177,7 @@ class OwnedResourcesOperations(object):
session.add(owned)
def _subnet_is_owned(self, session, subnet_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedSubnet).
filter_by(subnet_id=subnet_id).
first() is not None)
@ -188,7 +188,7 @@ class OwnedResourcesOperations(object):
session.add(owned)
def _network_is_owned(self, session, network_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedNetwork).
filter_by(network_id=network_id).
first() is not None)
@ -199,7 +199,7 @@ class OwnedResourcesOperations(object):
session.add(owned)
def _router_is_owned(self, session, router_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedRouter).
filter_by(router_id=router_id).
first() is not None)
@ -210,7 +210,7 @@ class OwnedResourcesOperations(object):
session.add(owned)
def _address_scope_is_owned(self, session, address_scope_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedAddressScope).
filter_by(address_scope_id=address_scope_id).
first() is not None)
@ -221,7 +221,7 @@ class OwnedResourcesOperations(object):
session.add(owned)
def _subnetpool_is_owned(self, session, subnetpool_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(OwnedSubnetpool).
filter_by(subnetpool_id=subnetpool_id).
first() is not None)
@ -310,8 +310,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
return address_scope
def _cleanup_address_scope(self, plugin_context, address_scope_id):
if self._address_scope_is_owned(plugin_context.session,
address_scope_id):
with db_api.CONTEXT_READER.using(plugin_context):
res = self._address_scope_is_owned(plugin_context.session,
address_scope_id)
if res:
subpools = self._get_subnetpools(plugin_context,
filters={'address_scope_id':
[address_scope_id]})
@ -350,8 +352,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
ip_version=ip_version)
def _cleanup_subnetpool(self, plugin_context, subnetpool_id):
if self._subnetpool_is_owned(plugin_context.session,
subnetpool_id):
with db_api.CONTEXT_READER.using(plugin_context):
res = self._subnetpool_is_owned(plugin_context.session,
subnetpool_id)
if res:
subnets = self._get_subnets(plugin_context,
filters={'subnetpool_id':
[subnetpool_id]})
@ -382,7 +386,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
context.set_network_id(network['id'])
def _cleanup_network(self, plugin_context, network_id):
if self._network_is_owned(plugin_context.session, network_id):
with db_api.CONTEXT_READER.using(plugin_context.session) as session:
res = self._network_is_owned(session, network_id)
if res:
self._delete_network(plugin_context, network_id)
def _generate_subnets_from_cidrs(self, context, l2p, l3p, cidrs,
@ -433,14 +439,15 @@ class ImplicitResourceOperations(local_api.LocalAPI,
context, subnet_id)
def _get_l3p_allocated_subnets(self, context, l3p_id):
ptgs = context._plugin._get_l3p_ptgs(
context._plugin_context.elevated(), l3p_id)
with db_api.CONTEXT_READER.using(context._plugin_context):
ptgs = context._plugin._get_l3p_ptgs(
context._plugin_context.elevated(), l3p_id)
return self._get_ptg_cidrs(context, None, ptg_dicts=ptgs)
def _validate_and_add_subnet(self, context, subnet, l3p_id):
subnet_id = subnet['id']
session = context._plugin_context.session
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
LOG.debug("starting validate_and_add_subnet transaction for "
"subnet %s", subnet_id)
ptgs = context._plugin._get_l3p_ptgs(
@ -619,8 +626,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
attrs.update(subnet_specifics)
subnet = self._create_subnet(context._plugin_context,
attrs)
self._mark_subnet_owned(context._plugin_context.session,
subnet['id'])
with db_api.CONTEXT_WRITER.using(context._plugin_context):
self._mark_subnet_owned(
context._plugin_context.session,
subnet['id'])
LOG.debug("Allocated subnet %(sub)s from subnetpool: "
"%(sp)s.", {'sub': subnet['id'],
'sp': pool['id']})
@ -656,7 +665,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
except ext_l3.RouterInterfaceNotFoundForSubnet:
LOG.debug("Ignoring RouterInterfaceNotFoundForSubnet cleaning "
"up subnet: %s", subnet_id)
if self._subnet_is_owned(plugin_context.session, subnet_id):
with db_api.CONTEXT_READER.using(plugin_context):
res = self._subnet_is_owned(plugin_context.session, subnet_id)
if res:
self._delete_subnet(plugin_context, subnet_id)
def _get_default_security_group(self, plugin_context, ptg_id,
@ -729,7 +741,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
raise last
def _cleanup_port(self, plugin_context, port_id):
if self._port_is_owned(plugin_context.session, port_id):
with db_api.CONTEXT_READER.using(plugin_context):
res = self._port_is_owned(plugin_context.session, port_id)
if res:
try:
self._delete_port(plugin_context, port_id)
except n_exc.PortNotFound:
@ -770,7 +784,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
return router_id
def _cleanup_router(self, plugin_context, router_id):
if self._router_is_owned(plugin_context.session, router_id):
with db_api.CONTEXT_READER.using(plugin_context):
res = self._router_is_owned(plugin_context.session, router_id)
if res:
self._delete_router(plugin_context, router_id)
def _plug_router_to_subnet(self, plugin_context, subnet_id, router_id):
@ -919,8 +935,11 @@ class ImplicitResourceOperations(local_api.LocalAPI,
if (context.original['external_segment_id'] !=
context.current['external_segment_id']):
if context.original['subnet_id']:
if self._subnet_is_owned(context._plugin_context.session,
context.original['subnet_id']):
with db_api.CONTEXT_READER.using(context._plugin_context):
res = self._subnet_is_owned(
context._plugin_context.session,
context.original['subnet_id'])
if res:
self._delete_subnet(context._plugin_context,
context.original['subnet_id'])
if (context.current['external_segment_id'] and not
@ -980,8 +999,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
def _delete_subnet_on_nat_pool_delete(self, context):
if context.current['subnet_id']:
if self._subnet_is_owned(context._plugin_context.session,
context.current['subnet_id']):
with db_api.CONTEXT_READER.using(context._plugin_context):
res = self._subnet_is_owned(context._plugin_context.session,
context.current['subnet_id'])
if res:
self._delete_subnet(context._plugin_context,
context.current['subnet_id'])
@ -2685,7 +2706,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
@staticmethod
def _get_policy_rule_set_sg_mapping(session, policy_rule_set_id):
with session.begin(subtransactions=True):
with db_api.CONTEXT_READER.using(session):
return (session.query(PolicyRuleSetSGsMapping).
filter_by(policy_rule_set_id=policy_rule_set_id).one())

View File

@ -22,6 +22,8 @@ from neutron.tests import base
from neutron_lib import context
from sqlalchemy.orm import exc
from neutron_lib import context as n_context
from gbpservice.neutron.db import api as db_api
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import exceptions as nfp_exc
@ -75,7 +77,7 @@ class NFPDBTestCase(SqlTestCase):
super(NFPDBTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.nfp_db = NFPDB()
self.session = db_api.get_writer_session()
self.session = n_context.get_admin_context().session
def create_network_function(self, attributes=None):
if attributes is None:
@ -90,7 +92,9 @@ class NFPDBTestCase(SqlTestCase):
'config_policy_id': 'config_policy_id',
'status': 'status'
}
return self.nfp_db.create_network_function(self.session, attributes)
with db_api.CONTEXT_WRITER.using(self.session):
return self.nfp_db.create_network_function(self.session,
attributes)
def test_create_network_function(self):
attrs = {
@ -140,16 +144,19 @@ class NFPDBTestCase(SqlTestCase):
'status': 'status'
}
network_function = self.create_network_function(attrs_all)
db_network_function = self.nfp_db.get_network_function(
self.session, network_function['id'])
for key in attrs_all:
self.assertEqual(attrs_all[key], db_network_function[key])
with db_api.CONTEXT_WRITER.using(self.session):
db_network_function = self.nfp_db.get_network_function(
self.session, network_function['id'])
for key in attrs_all:
self.assertEqual(attrs_all[key], db_network_function[key])
def test_list_network_function(self):
network_function = self.create_network_function()
network_functions = self.nfp_db.get_network_functions(self.session)
self.assertEqual(1, len(network_functions))
self.assertEqual(network_function['id'], network_functions[0]['id'])
with db_api.CONTEXT_WRITER.using(self.session):
network_functions = self.nfp_db.get_network_functions(self.session)
self.assertEqual(1, len(network_functions))
self.assertEqual(network_function['id'],
network_functions[0]['id'])
def test_list_network_function_with_filters(self):
attrs = {
@ -161,14 +168,16 @@ class NFPDBTestCase(SqlTestCase):
}
network_function = self.create_network_function(attrs)
filters = {'service_id': ['service_id']}
network_functions = self.nfp_db.get_network_functions(
self.session, filters=filters)
self.assertEqual(1, len(network_functions))
self.assertEqual(network_function['id'], network_functions[0]['id'])
filters = {'service_id': ['nonexisting']}
network_functions = self.nfp_db.get_network_functions(
self.session, filters=filters)
self.assertEqual([], network_functions)
with db_api.CONTEXT_READER.using(self.session):
network_functions = self.nfp_db.get_network_functions(
self.session, filters=filters)
self.assertEqual(1, len(network_functions))
self.assertEqual(network_function['id'],
network_functions[0]['id'])
filters = {'service_id': ['nonexisting']}
network_functions = self.nfp_db.get_network_functions(
self.session, filters=filters)
self.assertEqual([], network_functions)
def test_update_network_function(self):
self.nfp_db.update_node_instance_network_function_map = mock.MagicMock(
@ -176,18 +185,20 @@ class NFPDBTestCase(SqlTestCase):
network_function = self.create_network_function()
self.assertIsNotNone(network_function['id'])
updated_network_function = {'status': 'ERROR'}
network_function = self.nfp_db.update_network_function(
self.session, network_function['id'], updated_network_function)
self.assertEqual('ERROR', network_function['status'])
with db_api.CONTEXT_WRITER.using(self.session):
network_function = self.nfp_db.update_network_function(
self.session, network_function['id'], updated_network_function)
self.assertEqual('ERROR', network_function['status'])
def test_delete_network_function(self):
network_function = self.create_network_function()
self.assertIsNotNone(network_function['id'])
self.nfp_db.delete_network_function(
self.session, network_function['id'])
self.assertRaises(nfp_exc.NetworkFunctionNotFound,
self.nfp_db.get_network_function,
self.session, network_function['id'])
with db_api.CONTEXT_WRITER.using(self.session):
self.nfp_db.delete_network_function(
self.session, network_function['id'])
self.assertRaises(nfp_exc.NetworkFunctionNotFound,
self.nfp_db.get_network_function,
self.session, network_function['id'])
def create_network_function_instance(self, attributes=None,
create_nfd=True):
@ -213,8 +224,9 @@ class NFPDBTestCase(SqlTestCase):
],
'status': 'status'
}
return self.nfp_db.create_network_function_instance(
self.session, attributes)
with db_api.CONTEXT_WRITER.using(self.session):
return self.nfp_db.create_network_function_instance(
self.session, attributes)
def test_create_network_function_instance(self):
network_function = self.create_network_function()
@ -238,11 +250,13 @@ class NFPDBTestCase(SqlTestCase):
],
'status': 'status'
}
network_function_instance = (
self.nfp_db.create_network_function_instance(self.session, attrs))
for key in attrs:
self.assertEqual(attrs[key], network_function_instance[key])
self.assertIsNotNone(network_function_instance['id'])
with db_api.CONTEXT_WRITER.using(self.session):
network_function_instance = (
self.nfp_db.create_network_function_instance(self.session,
attrs))
for key in attrs:
self.assertEqual(attrs[key], network_function_instance[key])
self.assertIsNotNone(network_function_instance['id'])
def test_create_network_function_instance_mandatory_values(self):
network_function = self.create_network_function()
@ -253,17 +267,18 @@ class NFPDBTestCase(SqlTestCase):
'status': 'status',
'port_info': []
}
network_function_instance = (
self.nfp_db.create_network_function_instance(
self.session, attrs_mandatory))
for key in attrs_mandatory:
self.assertEqual(attrs_mandatory[key],
network_function_instance[key])
self.assertIsNotNone(network_function_instance['id'])
non_mandatory_args = ['network_function_device_id', 'ha_state']
for arg in non_mandatory_args:
self.assertIsNone(network_function_instance[arg])
self.assertEqual([], network_function_instance['port_info'])
with db_api.CONTEXT_WRITER.using(self.session):
network_function_instance = (
self.nfp_db.create_network_function_instance(
self.session, attrs_mandatory))
for key in attrs_mandatory:
self.assertEqual(attrs_mandatory[key],
network_function_instance[key])
self.assertIsNotNone(network_function_instance['id'])
non_mandatory_args = ['network_function_device_id', 'ha_state']
for arg in non_mandatory_args:
self.assertIsNone(network_function_instance[arg])
self.assertEqual([], network_function_instance['port_info'])
def test_get_network_function_instance(self):
network_function = self.create_network_function()
@ -287,54 +302,60 @@ class NFPDBTestCase(SqlTestCase):
],
'status': 'status'
}
network_function_instance = (
self.nfp_db.create_network_function_instance(
self.session, attrs_all))
db_network_function_instance = (
self.nfp_db.get_network_function_instance(
self.session, network_function_instance['id']))
for key in attrs_all:
self.assertEqual(attrs_all[key], db_network_function_instance[key])
with db_api.CONTEXT_WRITER.using(self.session):
network_function_instance = (
self.nfp_db.create_network_function_instance(
self.session, attrs_all))
db_network_function_instance = (
self.nfp_db.get_network_function_instance(
self.session, network_function_instance['id']))
for key in attrs_all:
self.assertEqual(attrs_all[key],
db_network_function_instance[key])
def test_list_network_function_instance(self):
self.test_create_network_function_instance()
nf_instances = self.nfp_db.get_network_function_instances(
self.session)
self.assertEqual(1, len(nf_instances))
with db_api.CONTEXT_READER.using(self.session):
self.test_create_network_function_instance()
nf_instances = self.nfp_db.get_network_function_instances(
self.session)
self.assertEqual(1, len(nf_instances))
def test_list_network_function_instances_with_filters(self):
self.test_create_network_function_instance()
filters = {'ha_state': ['Active']}
nf_instances = self.nfp_db.get_network_function_instances(
self.session, filters=filters)
self.assertEqual(1, len(nf_instances))
filters = {'ha_state': ['nonexisting']}
nf_instances = self.nfp_db.get_network_function_instances(
self.session, filters=filters)
self.assertEqual([], nf_instances)
with db_api.CONTEXT_READER.using(self.session):
self.test_create_network_function_instance()
filters = {'ha_state': ['Active']}
nf_instances = self.nfp_db.get_network_function_instances(
self.session, filters=filters)
self.assertEqual(1, len(nf_instances))
filters = {'ha_state': ['nonexisting']}
nf_instances = self.nfp_db.get_network_function_instances(
self.session, filters=filters)
self.assertEqual([], nf_instances)
def test_update_network_function_instance(self):
network_function_instance = self.create_network_function_instance()
self.assertIsNotNone(network_function_instance['id'])
updated_nfi = {'status': 'ERROR'}
nf_instance = self.nfp_db.update_network_function_instance(
self.session, network_function_instance['id'], updated_nfi)
self.assertEqual('ERROR', nf_instance['status'])
with db_api.CONTEXT_WRITER.using(self.session):
network_function_instance = self.create_network_function_instance()
self.assertIsNotNone(network_function_instance['id'])
updated_nfi = {'status': 'ERROR'}
nf_instance = self.nfp_db.update_network_function_instance(
self.session, network_function_instance['id'], updated_nfi)
self.assertEqual('ERROR', nf_instance['status'])
def test_delete_network_function_instance(self):
network_function_instance = self.create_network_function_instance()
port_info = network_function_instance['port_info']
self.assertIsNotNone(network_function_instance['id'])
self.nfp_db.delete_network_function_instance(
self.session, network_function_instance['id'])
self.assertRaises(nfp_exc.NetworkFunctionInstanceNotFound,
self.nfp_db.get_network_function_instance,
self.session, network_function_instance['id'])
for port_id in port_info:
self.assertRaises(nfp_exc.NFPPortNotFound,
self.nfp_db.get_port_info,
self.session,
port_id)
with db_api.CONTEXT_WRITER.using(self.session):
self.nfp_db.delete_network_function_instance(
self.session, network_function_instance['id'])
self.assertRaises(nfp_exc.NetworkFunctionInstanceNotFound,
self.nfp_db.get_network_function_instance,
self.session, network_function_instance['id'])
for port_id in port_info:
self.assertRaises(nfp_exc.NFPPortNotFound,
self.nfp_db.get_port_info,
self.session,
port_id)
def create_network_function_device(self, attributes=None):
if attributes is None:
@ -364,8 +385,9 @@ class NFPDBTestCase(SqlTestCase):
'port_role': nfp_constants.ACTIVE_PORT},
'status': 'status'
}
return self.nfp_db.create_network_function_device(
self.session, attributes)
with db_api.CONTEXT_WRITER.using(self.session):
return self.nfp_db.create_network_function_device(
self.session, attributes)
def test_create_network_function_device(self):
attrs = {
@ -394,16 +416,18 @@ class NFPDBTestCase(SqlTestCase):
'port_role': nfp_constants.ACTIVE_PORT},
'status': 'status'
}
network_function_device = self.nfp_db.create_network_function_device(
self.session, attrs)
self.assertIn('gateway_port', network_function_device)
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
self.assertEqual(attrs[key], network_function_device[key])
self.assertIsNotNone(network_function_device['id'])
with db_api.CONTEXT_WRITER.using(self.session):
network_function_device = (
self.nfp_db.create_network_function_device(
self.session, attrs))
self.assertIn('gateway_port', network_function_device)
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
self.assertEqual(attrs[key], network_function_device[key])
self.assertIsNotNone(network_function_device['id'])
def test_create_network_function_device_mandatory_values(self):
attrs_mandatory = {
@ -416,16 +440,17 @@ class NFPDBTestCase(SqlTestCase):
'interfaces_in_use': 1,
'status': 'status'
}
nf_device = self.nfp_db.create_network_function_device(
self.session, attrs_mandatory)
for key in attrs_mandatory:
self.assertEqual(attrs_mandatory[key], nf_device[key])
self.assertIsNotNone(nf_device['id'])
non_mandatory_args = ['monitoring_port_id',
'monitoring_port_network']
for arg in non_mandatory_args:
self.assertIsNone(nf_device[arg])
self.assertIsNone(nf_device['mgmt_port_id'])
with db_api.CONTEXT_WRITER.using(self.session):
nf_device = self.nfp_db.create_network_function_device(
self.session, attrs_mandatory)
for key in attrs_mandatory:
self.assertEqual(attrs_mandatory[key], nf_device[key])
self.assertIsNotNone(nf_device['id'])
non_mandatory_args = ['monitoring_port_id',
'monitoring_port_network']
for arg in non_mandatory_args:
self.assertIsNone(nf_device[arg])
self.assertIsNone(nf_device['mgmt_port_id'])
def test_get_network_function_device(self):
attrs = {
@ -454,33 +479,41 @@ class NFPDBTestCase(SqlTestCase):
'port_role': nfp_constants.ACTIVE_PORT},
'status': 'status'
}
network_function_device = self.nfp_db.create_network_function_device(
self.session, attrs)
db_network_function_device = self.nfp_db.get_network_function_device(
self.session, network_function_device['id'])
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
self.assertEqual(attrs[key], db_network_function_device[key])
with db_api.CONTEXT_WRITER.using(self.session):
network_function_device = (
self.nfp_db.create_network_function_device(
self.session, attrs))
db_network_function_device = (
self.nfp_db.get_network_function_device(
self.session, network_function_device['id']))
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
self.assertEqual(attrs[key], db_network_function_device[key])
def test_list_network_function_device(self):
self.test_create_network_function_device()
network_function_devices = self.nfp_db.get_network_function_devices(
self.session)
self.assertEqual(1, len(network_function_devices))
with db_api.CONTEXT_READER.using(self.session):
self.test_create_network_function_device()
network_function_devices = (
self.nfp_db.get_network_function_devices(
self.session))
self.assertEqual(1, len(network_function_devices))
def test_list_network_function_devices_with_filters(self):
self.test_create_network_function_device()
filters = {'service_vendor': ['service_vendor']}
network_function_devices = self.nfp_db.get_network_function_devices(
self.session, filters=filters)
self.assertEqual(1, len(network_function_devices))
filters = {'service_vendor': ['nonexisting']}
network_function_devices = self.nfp_db.get_network_function_devices(
self.session, filters=filters)
self.assertEqual([], network_function_devices)
with db_api.CONTEXT_READER.using(self.session):
self.test_create_network_function_device()
filters = {'service_vendor': ['service_vendor']}
network_function_devices = (
self.nfp_db.get_network_function_devices(
self.session, filters=filters))
self.assertEqual(1, len(network_function_devices))
filters = {'service_vendor': ['nonexisting']}
network_function_devices = (
self.nfp_db.get_network_function_devices(
self.session, filters=filters))
self.assertEqual([], network_function_devices)
def test_update_network_function_device(self):
attrs = {
@ -509,67 +542,70 @@ class NFPDBTestCase(SqlTestCase):
'port_role': nfp_constants.ACTIVE_PORT},
'status': 'status'
}
network_function_device = self.nfp_db.create_network_function_device(
self.session, attrs)
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
with db_api.CONTEXT_WRITER.using(self.session):
network_function_device = (
self.nfp_db.create_network_function_device(
self.session, attrs))
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
self.assertEqual(attrs[key], network_function_device[key])
self.assertIsNotNone(network_function_device['id'])
self.assertEqual(attrs[key], network_function_device[key])
self.assertIsNotNone(network_function_device['id'])
# update name
updated_network_function_device = {
'name': 'new_name'
}
updated_nfd = self.nfp_db.update_network_function_device(
self.session,
network_function_device['id'],
updated_network_function_device)
self.assertEqual('new_name', updated_nfd['name'])
del updated_nfd['name']
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
if key != 'name':
self.assertEqual(attrs[key], updated_nfd[key])
# update name
updated_network_function_device = {
'name': 'new_name'
}
updated_nfd = self.nfp_db.update_network_function_device(
self.session,
network_function_device['id'],
updated_network_function_device)
self.assertEqual('new_name', updated_nfd['name'])
del updated_nfd['name']
for key in attrs:
if (key == 'mgmt_port_id') or (key == 'monitoring_port_id'):
self.assertEqual(attrs[key]['id'],
network_function_device[key])
continue
if key != 'name':
self.assertEqual(attrs[key], updated_nfd[key])
# Update mgmt port
updated_network_function_device = {
'mgmt_port_id': {
'id': 'myid3',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.MANAGEMENT,
'port_role': nfp_constants.ACTIVE_PORT},
'name': 'name'
}
updated_nfd = self.nfp_db.update_network_function_device(
self.session,
network_function_device['id'],
copy.deepcopy(updated_network_function_device))
self.assertEqual(updated_nfd['mgmt_port_id'], 'myid3')
del updated_nfd['mgmt_port_id']
for key in attrs:
if (key != 'mgmt_port_id') and (key != 'monitoring_port_id'):
self.assertEqual(attrs[key], updated_nfd[key])
# Update mgmt port
updated_network_function_device = {
'mgmt_port_id': {
'id': 'myid3',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.MANAGEMENT,
'port_role': nfp_constants.ACTIVE_PORT},
'name': 'name'
}
updated_nfd = self.nfp_db.update_network_function_device(
self.session,
network_function_device['id'],
copy.deepcopy(updated_network_function_device))
self.assertEqual(updated_nfd['mgmt_port_id'], 'myid3')
del updated_nfd['mgmt_port_id']
for key in attrs:
if (key != 'mgmt_port_id') and (key != 'monitoring_port_id'):
self.assertEqual(attrs[key], updated_nfd[key])
def test_delete_network_function_device(self):
network_function_device = self.create_network_function_device()
mgmt_port_id = network_function_device['mgmt_port_id']
self.assertIsNotNone(network_function_device['id'])
self.nfp_db.delete_network_function_device(
self.session, network_function_device['id'])
self.assertRaises(nfp_exc.NetworkFunctionDeviceNotFound,
self.nfp_db.get_network_function_device,
self.session, network_function_device['id'])
self.assertRaises(nfp_exc.NFPPortNotFound,
self.nfp_db.get_port_info,
self.session,
mgmt_port_id)
with db_api.CONTEXT_WRITER.using(self.session):
self.nfp_db.delete_network_function_device(
self.session, network_function_device['id'])
self.assertRaises(nfp_exc.NetworkFunctionDeviceNotFound,
self.nfp_db.get_network_function_device,
self.session, network_function_device['id'])
self.assertRaises(nfp_exc.NFPPortNotFound,
self.nfp_db.get_port_info,
self.session,
mgmt_port_id)
def _get_gateway_details(self):
return dict(
@ -582,46 +618,51 @@ class NFPDBTestCase(SqlTestCase):
)
def test_add_service_gateway_details(self):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
self.nfp_db.delete_network_function(
self.session, gateway_details['network_function_id'])
gateway_details.update(
network_function_id=self.create_network_function()['id'],
gateway_vips=dict(primary_gw_vip_pt=str(uuid.uuid4()),
secondary_gw_vip_pt=str(uuid.uuid4())))
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
with db_api.CONTEXT_WRITER.using(self.session):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
self.nfp_db.delete_network_function(
self.session, gateway_details['network_function_id'])
gateway_details.update(
network_function_id=self.create_network_function()['id'],
gateway_vips=dict(primary_gw_vip_pt=str(uuid.uuid4()),
secondary_gw_vip_pt=str(uuid.uuid4())))
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
def test_get_gateway_detail(self):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
_gateway = self.nfp_db.get_gateway_detail(
self.session, gateway_details['network_function_id'])
self.assertEqual((gateway['id'], gateway['network_function_id']),
(_gateway['id'], _gateway['network_function_id']))
with db_api.CONTEXT_WRITER.using(self.session):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
_gateway = self.nfp_db.get_gateway_detail(
self.session, gateway_details['network_function_id'])
self.assertEqual((gateway['id'], gateway['network_function_id']),
(_gateway['id'], _gateway['network_function_id']))
def test_get_providers_for_gateway(self):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
_gateway = self.nfp_db.get_providers_for_gateway(
self.session, gateway_details['gw_ptg'])[0]
self.assertEqual((gateway['id'], gateway['network_function_id']),
(_gateway['id'], _gateway['network_function_id']))
with db_api.CONTEXT_WRITER.using(self.session):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
_gateway = self.nfp_db.get_providers_for_gateway(
self.session, gateway_details['gw_ptg'])[0]
self.assertEqual((gateway['id'], gateway['network_function_id']),
(_gateway['id'], _gateway['network_function_id']))
def test_delete_gateway(self):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
self.nfp_db.delete_network_function(self.session, gateway_details[
'network_function_id'])
self.assertRaises(exc.NoResultFound, self.nfp_db.get_gateway_detail,
self.session, gateway_details['network_function_id'])
with db_api.CONTEXT_WRITER.using(self.session):
gateway_details = self._get_gateway_details()
gateway = self.nfp_db.add_service_gateway_details(
self.session, gateway_details)
self.assertIsNotNone(gateway['id'])
self.nfp_db.delete_network_function(self.session, gateway_details[
'network_function_id'])
self.assertRaises(exc.NoResultFound,
self.nfp_db.get_gateway_detail,
self.session, gateway_details['network_function_id'])

File diff suppressed because it is too large Load Diff

View File

@ -164,8 +164,10 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
ml2_options=ml2_opts, l3_plugin=l3_plugin,
sc_plugin=sc_plugin, qos_plugin=qos_plugin,
trunk_plugin=trunk_plugin)
self.db_session = db_api.get_writer_session()
self.initialize_db_config(self.db_session)
ctx = nctx.get_admin_context()
with db_api.CONTEXT_WRITER.using(ctx):
self.db_session = ctx.session
self.initialize_db_config(self.db_session)
self.l3_plugin = directory.get_plugin(pconst.L3)
config.cfg.CONF.set_override('network_vlan_ranges',
['physnet1:1000:1099'],
@ -840,7 +842,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
def _get_nsp_ptg_fip_mapping(self, ptg_id):
ctx = nctx.get_admin_context()
with ctx.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(ctx):
return (ctx.session.query(
nsp_manager.ServicePolicyPTGFipMapping).
filter_by(policy_target_group_id=ptg_id).
@ -1610,15 +1612,15 @@ class TestLegacyL3Policy(TestL3Policy):
aimd.AIMMappingDriver._create_per_l3p_implicit_contracts)
def _create_per_l3p_implicit_contracts(self):
session = nctx.get_admin_context().session
with session.begin(subtransactions=True):
ctx = nctx.get_admin_context()
with db_api.CONTEXT_WRITER.using(ctx):
l3p_db = group_policy_mapping_db.L3PolicyMapping(
id='1234', tenant_id='some_tenant',
name='test-l3p', description='test-desc',
ip_version=4, ip_pool='10.0.0.0/8',
subnet_prefix_length=24, shared=False)
session.add(l3p_db)
session.flush()
ctx.session.add(l3p_db)
ctx.session.flush()
orig_create_per_l3p_implicit_contracts(self)
aimd.AIMMappingDriver._create_per_l3p_implicit_contracts = (
orig_create_per_l3p_implicit_contracts)
@ -2389,47 +2391,50 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
num_address_families=len(list(self.ip_dict.keys())))
ptg_name = ptg['name']
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
self._neutron_context.session, ptg_id, ptg_name,
context=self._neutron_context)
aim_tenant_name = self.name_mapper.project(None, self._tenant_id)
aim_app_profile_name = self.mech_driver.ap_name
aim_app_profiles = self.aim_mgr.find(
self._aim_context, aim_resource.ApplicationProfile,
tenant_name=aim_tenant_name, name=aim_app_profile_name)
self.assertEqual(1, len(aim_app_profiles))
req = self.new_show_request('networks', l2p['network_id'],
fmt=self.fmt)
net = self.deserialize(self.fmt,
req.get_response(self.api))['network']
bd = self.aim_mgr.get(
self._aim_context, aim_resource.BridgeDomain.from_dn(
net[DN][BD]))
aim_epgs = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup, name=aim_epg_name)
self.assertEqual(1, len(aim_epgs))
self.assertEqual(aim_epg_name, aim_epgs[0].name)
self.assertEqual(aim_tenant_name, aim_epgs[0].tenant_name)
self.assertEqual(bd.name, aim_epgs[0].bd_name)
with self.db_session.begin():
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
self._neutron_context.session, ptg_id, ptg_name,
context=self._neutron_context)
aim_tenant_name = self.name_mapper.project(None, self._tenant_id)
aim_app_profile_name = self.mech_driver.ap_name
aim_app_profiles = self.aim_mgr.find(
self._aim_context, aim_resource.ApplicationProfile,
tenant_name=aim_tenant_name, name=aim_app_profile_name)
self.assertEqual(1, len(aim_app_profiles))
req = self.new_show_request('networks', l2p['network_id'],
fmt=self.fmt)
net = self.deserialize(self.fmt,
req.get_response(self.api))['network']
bd = self.aim_mgr.get(
self._aim_context, aim_resource.BridgeDomain.from_dn(
net[DN][BD]))
aim_epgs = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup,
name=aim_epg_name)
self.assertEqual(1, len(aim_epgs))
self.assertEqual(aim_epg_name, aim_epgs[0].name)
self.assertEqual(aim_tenant_name, aim_epgs[0].tenant_name)
self.assertEqual(bd.name, aim_epgs[0].bd_name)
self.assertEqual(aim_epgs[0].dn,
ptg[DN][EPG])
self._test_aim_resource_status(aim_epgs[0], ptg)
self.assertEqual(aim_epgs[0].dn, ptg_show[DN][EPG])
self.assertEqual(aim_epgs[0].dn,
ptg[DN][EPG])
self._test_aim_resource_status(aim_epgs[0], ptg)
self.assertEqual(aim_epgs[0].dn, ptg_show[DN][EPG])
new_name = 'new name'
new_prs_lists = self._get_provided_consumed_prs_lists()
self.update_policy_target_group(
ptg_id, expected_res_status=200, name=new_name,
provided_policy_rule_sets={new_prs_lists['provided']['id']:
'scope'},
consumed_policy_rule_sets={new_prs_lists['consumed']['id']:
'scope'})['policy_target_group']
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
self._neutron_context.session, ptg_id, new_name,
context=self._neutron_context)
aim_epgs = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup, name=aim_epg_name)
new_name = 'new name'
new_prs_lists = self._get_provided_consumed_prs_lists()
self.update_policy_target_group(
ptg_id, expected_res_status=200, name=new_name,
provided_policy_rule_sets={new_prs_lists['provided']['id']:
'scope'},
consumed_policy_rule_sets={new_prs_lists['consumed']['id']:
'scope'})['policy_target_group']
aim_epg_name = self.driver.apic_epg_name_for_policy_target_group(
self._neutron_context.session, ptg_id, new_name,
context=self._neutron_context)
aim_epgs = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup,
name=aim_epg_name)
self.assertEqual(1, len(aim_epgs))
self.assertEqual(aim_epg_name, aim_epgs[0].name)
self._validate_contracts(ptg, aim_epgs[0], new_prs_lists, l2p)
@ -2445,8 +2450,11 @@ class TestPolicyTargetGroupIpv4(AIMBaseTestCase):
# Explicitly created L2P should not be deleted
self.show_l2_policy(ptg['l2_policy_id'], expected_res_status=200)
aim_epgs = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup, name=aim_epg_name)
# TODO(pulkit): replace with AIM reader context once API supports it.
with self.db_session.begin():
aim_epgs = self.aim_mgr.find(
self._aim_context, aim_resource.EndpointGroup,
name=aim_epg_name)
self.assertEqual(0, len(aim_epgs))
def _create_explicit_subnetpools(self):
@ -2949,9 +2957,11 @@ class TestGbpDetailsForML2(AIMBaseTestCase,
def test_get_gbp_details_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf)
@ -3624,9 +3634,11 @@ class TestPolicyTarget(AIMBaseTestCase,
def test_get_gbp_details_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_get_gbp_details(pre_vrf=vrf)
def test_get_gbp_details_no_pt(self):
@ -3636,9 +3648,11 @@ class TestPolicyTarget(AIMBaseTestCase,
def test_get_gbp_details_no_pt_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
vrf = self.aim_mgr.create(
aim_ctx, aim_resource.VRF(tenant_name='common', name='ctx1',
monitored=True))
self._do_test_gbp_details_no_pt(pre_vrf=vrf)
def test_get_gbp_details_no_pt_no_as(self):
@ -5524,29 +5538,30 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p1 = self._make_port(self.fmt, net['network']['id'],
device_owner='compute:')['port']
p1 = self._bind_port_to_host(p1['id'], 'host1')['port']
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='host1')
self.assertFalse(details['promiscuous_mode'])
with self.db_session.begin():
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p1['id'],
host='host1')
self.assertFalse(details['promiscuous_mode'])
p2 = self._make_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',),
device_owner='compute:',
port_security_enabled=True)['port']
p2 = self._bind_port_to_host(p2['id'], 'host1')['port']
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2['id'],
host='host1')
self.assertFalse(details['promiscuous_mode'])
p2 = self._make_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',),
device_owner='compute:',
port_security_enabled=True)['port']
p2 = self._bind_port_to_host(p2['id'], 'host1')['port']
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p2['id'],
host='host1')
self.assertFalse(details['promiscuous_mode'])
p3 = self._make_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',),
device_owner='compute:',
port_security_enabled=False)['port']
p3 = self._bind_port_to_host(p3['id'], 'host1')['port']
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p3['id'],
host='host1')
p3 = self._make_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',),
device_owner='compute:',
port_security_enabled=False)['port']
p3 = self._bind_port_to_host(p3['id'], 'host1')['port']
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % p3['id'],
host='host1')
self.assertTrue(details['promiscuous_mode'])
# Test RPC without a PortSecurityBinding record, which should

View File

@ -101,39 +101,58 @@ class AimValidationTestMixin(object):
resource = copy.copy(resource)
# Make sure the AIM resource exists.
actual_resource = self.aim_mgr.get(self.aim_ctx, resource)
# TODO(pulkit): replace with AIM reader context once API supports it.
with self.db_session.begin():
actual_resource = self.aim_mgr.get(self.aim_ctx, resource)
self.assertIsNotNone(actual_resource)
# Only test deleting and modifying if not monitored.
if not actual_resource.monitored:
# Delete the AIM resource and test.
self.aim_mgr.delete(self.aim_ctx, resource)
# TODO(pulkit): replace with AIM writer context once API
# supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, resource)
self._validate_repair_validate()
self.assertTrue(
actual_resource.user_equal(
self.aim_mgr.get(self.aim_ctx, resource)))
# TODO(pulkit): replace with AIM writer context once API
# supports it.
with self.db_session.begin():
self.assertTrue(
actual_resource.user_equal(
self.aim_mgr.get(self.aim_ctx, resource)))
# Modify the AIM resource and test.
self.aim_mgr.update(
self.aim_ctx, resource, display_name='not what it was')
# Modify the AIM resource and test.
self.aim_mgr.update(
self.aim_ctx, resource, display_name='not what it was')
self._validate_repair_validate()
self.assertTrue(
actual_resource.user_equal(
self.aim_mgr.get(self.aim_ctx, resource)))
# TODO(pulkit): replace with AIM reader context once API
# supports it.
with self.db_session.begin():
self.assertTrue(
actual_resource.user_equal(
self.aim_mgr.get(self.aim_ctx, resource)))
# Add unexpected AIM resource and test.
setattr(resource, unexpected_attr_name, unexpected_attr_value)
self.aim_mgr.create(self.aim_ctx, resource)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, resource)
self._validate_repair_validate()
if test_unexpected_monitored:
# Add unexpected monitored AIM resource and test.
resource.monitored = True
self.aim_mgr.create(self.aim_ctx, resource)
# TODO(pulkit): replace with AIM writer context once API
# supports it.
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, resource)
self._validate()
# Delete unexpected monitored AIM resource.
self.aim_mgr.delete(self.aim_ctx, resource)
# TODO(pulkit): replace with AIM writer context once API
# supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, resource)
class AimValidationTestCase(test_aim_mapping_driver.AIMBaseTestCase,
@ -175,7 +194,9 @@ class TestNeutronMapping(AimValidationTestCase):
# Delete the common Tenant and test.
tenant = aim_resource.Tenant(name='common')
self.aim_mgr.delete(self.aim_ctx, tenant)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, tenant)
self._validate_repair_validate()
# Test unrouted AIM VRF.
@ -316,9 +337,10 @@ class TestNeutronMapping(AimValidationTestCase):
self._validate()
# Delete the address scope's mapping record and test.
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope4_id).
delete())
with self.db_session.begin():
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope4_id).
delete())
self._validate_repair_validate()
# Test AIM VRF.
@ -336,9 +358,10 @@ class TestNeutronMapping(AimValidationTestCase):
self._test_aim_resource(vrf)
# Delete the initial address scope's mapping record and test.
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope4_id).
delete())
with self.db_session.begin():
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope4_id).
delete())
self._validate_repair_validate()
scope4 = self._show('address-scopes', scope4_id)['address_scope']
self.assertEqual(vrf_dn, scope4['apic:distinguished_names']['VRF'])
@ -352,9 +375,10 @@ class TestNeutronMapping(AimValidationTestCase):
# test. Without this record, there is no way to know that the
# scopes were previously isomorphic, so they no longer will
# be isomorphic after repair.
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope6_id).
delete())
with self.db_session.begin():
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope6_id).
delete())
self._validate_repair_validate()
scope4 = self._show('address-scopes', scope4_id)['address_scope']
self.assertEqual(vrf_dn, scope4['apic:distinguished_names']['VRF'])
@ -385,9 +409,10 @@ class TestNeutronMapping(AimValidationTestCase):
net = self._show('networks', net['id'])['network']
# Delete the network's mapping record and test.
(self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
delete())
with self.db_session.begin():
(self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
delete())
self._validate_repair_validate()
self._test_network_attrs(net)
@ -431,7 +456,8 @@ class TestNeutronMapping(AimValidationTestCase):
# Add unexpect AIM Subnet if not external.
sn = self.driver.aim_mech_driver._map_subnet(
subnet, '10.0.2.1', bd)
self.aim_mgr.create(self.aim_ctx, sn)
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, sn)
self._validate_repair_validate()
else:
# Test AIM Subnet if external.
@ -447,9 +473,10 @@ class TestNeutronMapping(AimValidationTestCase):
self._test_aim_resource(sn, 'gw_ip_mask', '10.0.3.1/24')
# Delete subnet extension data and test migration use case.
(self.db_session.query(ext_db.SubnetExtensionDb).
filter_by(subnet_id=subnet_id).
delete())
with self.db_session.begin():
(self.db_session.query(ext_db.SubnetExtensionDb).
filter_by(subnet_id=subnet_id).
delete())
self._validate_repair_validate()
return net
@ -462,7 +489,9 @@ class TestNeutronMapping(AimValidationTestCase):
bd = aim_resource.BridgeDomain(tenant_name=tenant_name,
name='some_bd_name')
bd.monitored = True
bd = self.aim_mgr.create(self.aim_ctx, bd)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
bd = self.aim_mgr.create(self.aim_ctx, bd)
self._test_unrouted_network(preexisting_bd=bd)
@ -493,9 +522,10 @@ class TestNeutronMapping(AimValidationTestCase):
# REVISIT: We should consider supporting configuration file
# mappings of pre-existing BDs.
if not preexisting_bd:
(self.db_session.query(ext_db.NetworkExtensionDb).
filter_by(network_id=net_id).
delete())
with self.db_session.begin():
(self.db_session.query(ext_db.NetworkExtensionDb).
filter_by(network_id=net_id).
delete())
self._validate_repair_validate()
self._test_network_attrs(net)
@ -503,7 +533,9 @@ class TestNeutronMapping(AimValidationTestCase):
# Create AIM HostDomainMappingV2.
hd_mapping = aim_infra.HostDomainMappingV2(
host_name='*', domain_name='vm2', domain_type='OpenStack')
self.aim_mgr.create(self.aim_ctx, hd_mapping)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, hd_mapping)
# Create external network.
kwargs = {'router:external': True,
@ -577,23 +609,27 @@ class TestNeutronMapping(AimValidationTestCase):
def test_preexisting_external_network(self):
# Create pre-existing AIM VRF.
vrf = aim_resource.VRF(tenant_name='common', name='v1', monitored=True)
self.aim_mgr.create(self.aim_ctx, vrf)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, vrf)
# Create pre-existing AIM L3Outside.
l3out = aim_resource.L3Outside(
tenant_name='common', name='l1', vrf_name='v1', monitored=True)
self.aim_mgr.create(self.aim_ctx, l3out)
# Create pre-existing AIM L3Outside.
l3out = aim_resource.L3Outside(
tenant_name='common', name='l1', vrf_name='v1', monitored=True)
self.aim_mgr.create(self.aim_ctx, l3out)
# Create pre-existing AIM ExternalNetwork.
ext_net = aim_resource.ExternalNetwork(
tenant_name='common', l3out_name='l1', name='n1', monitored=True)
self.aim_mgr.create(self.aim_ctx, ext_net)
# Create pre-existing AIM ExternalNetwork.
ext_net = aim_resource.ExternalNetwork(
tenant_name='common', l3out_name='l1', name='n1',
monitored=True)
self.aim_mgr.create(self.aim_ctx, ext_net)
# Create pre-existing AIM ExternalSubnet.
ext_sn = aim_resource.ExternalSubnet(
tenant_name='common', l3out_name='l1', external_network_name='n1',
cidr='0.0.0.0/0', monitored=True)
self.aim_mgr.create(self.aim_ctx, ext_sn)
# Create pre-existing AIM ExternalSubnet.
ext_sn = aim_resource.ExternalSubnet(
tenant_name='common', l3out_name='l1',
external_network_name='n1',
cidr='0.0.0.0/0', monitored=True)
self.aim_mgr.create(self.aim_ctx, ext_sn)
# Run tests.
net = self._test_external_network(vrf_name='v1')
@ -601,16 +637,17 @@ class TestNeutronMapping(AimValidationTestCase):
# Delete network extension data and clear ExternalNetwork
# contracts to test migration use case.
(self.db_session.query(ext_db.NetworkExtensionDb).
filter_by(network_id=net_id).
delete())
(self.db_session.query(ext_db.NetworkExtensionCidrDb).
filter_by(network_id=net_id).
delete())
self.aim_mgr.update(
self.aim_ctx, ext_net,
provided_contract_names=[],
consumed_contract_names=[])
with self.db_session.begin():
(self.db_session.query(ext_db.NetworkExtensionDb).
filter_by(network_id=net_id).
delete())
(self.db_session.query(ext_db.NetworkExtensionCidrDb).
filter_by(network_id=net_id).
delete())
self.aim_mgr.update(
self.aim_ctx, ext_net,
provided_contract_names=[],
consumed_contract_names=[])
# Test without DN for migration.
self._validate_unrepairable()
@ -634,11 +671,15 @@ class TestNeutronMapping(AimValidationTestCase):
self._test_network_attrs(net)
# Delete pre-existing AIM VRF and test.
self.aim_mgr.delete(self.aim_ctx, vrf)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, vrf)
self._validate_unrepairable()
# Replace pre-existing AIM VRF and test.
self.aim_mgr.create(self.aim_ctx, vrf)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, vrf)
self._validate()
# REVISIT: Missing AIM L3Outsides, ExternalNetworks, and
@ -658,16 +699,22 @@ class TestNeutronMapping(AimValidationTestCase):
# that might break existing use cases.
# Delete pre-existing AIM L3Outside and test.
self.aim_mgr.delete(self.aim_ctx, l3out)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, l3out)
self._validate_repair_validate()
# Delete pre-existing AIM ExternalNetwork, along with its
# child ExternalSubnet, and test.
self.aim_mgr.delete(self.aim_ctx, ext_net, cascade=True)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, ext_net, cascade=True)
self._validate_repair_validate()
# Delete just the pre-existing AIM ExternalSubnet and test.
self.aim_mgr.delete(self.aim_ctx, ext_sn)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, ext_sn)
self._validate_repair_validate()
def test_svi_network(self):
@ -782,9 +829,10 @@ class TestNeutronMapping(AimValidationTestCase):
self._test_aim_resource(esn, 'cidr', '1.2.3.4/0')
# Delete the CloneL3Out record and test.
(self.db_session.query(aim_lib_model.CloneL3Out).
filter_by(tenant_name=tenant_name, name=l3out_name).
delete())
with self.db_session.begin():
(self.db_session.query(aim_lib_model.CloneL3Out).
filter_by(tenant_name=tenant_name, name=l3out_name).
delete())
self._validate_repairable_scoped(["network"], None)
self._validate_repair_validate()
@ -1030,18 +1078,21 @@ class TestNeutronMapping(AimValidationTestCase):
# Change port binding level to unknown mechanism driver and
# test.
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'driver': 'xxx'})
with self.db_session.begin():
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'driver': 'xxx'})
self._validate_repair_validate()
# Change port binding level to incorrect host and test.
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'host': 'yyy'})
with self.db_session.begin():
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'host': 'yyy'})
self._validate_repair_validate()
# Change port binding level to null segment ID and test.
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'segment_id': None})
with self.db_session.begin():
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'segment_id': None})
self._validate_repair_validate()
# Change port binding level to unknown mechanism driver, set
@ -1051,11 +1102,12 @@ class TestNeutronMapping(AimValidationTestCase):
# dynamic segment whenever there is no agent on the port's
# host, which is probably wrong, but it does fail to bind, so
# this test succeeds.
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'driver': 'xxx',
'host': 'yyy'})
self.db_session.query(ml2_models.PortBinding).filter_by(
port_id=port['id']).update({'host': 'yyy'})
with self.db_session.begin():
self.db_session.query(ml2_models.PortBindingLevel).filter_by(
port_id=port['id'], level=0).update({'driver': 'xxx',
'host': 'yyy'})
self.db_session.query(ml2_models.PortBinding).filter_by(
port_id=port['id']).update({'host': 'yyy'})
self._validate_fails_binding_ports()
def test_erspan_ports(self):
@ -1076,10 +1128,12 @@ class TestNeutronMapping(AimValidationTestCase):
host1_dn = 'topology/pod-1/protpaths-101-102/pathep-[%s]' % host1_pg
self.hlink1 = aim_infra.HostLink(
host_name='host1', interface_name='eth0', path=host1_dn)
self.aim_mgr.create(self.aim_ctx, self.hlink1)
acc_bundle = aim_resource.InfraAccBundleGroup(name=host1_pg,
monitored=True)
self.aim_mgr.create(self.aim_ctx, acc_bundle)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.create(self.aim_ctx, self.hlink1)
acc_bundle = aim_resource.InfraAccBundleGroup(name=host1_pg,
monitored=True)
self.aim_mgr.create(self.aim_ctx, acc_bundle)
# Add ERSPAN session and verify that it validates.
erspan_config = {'apic:erspan_config': [
@ -1092,9 +1146,11 @@ class TestNeutronMapping(AimValidationTestCase):
# Delete source group from AIM, and verify that it
# can be repaired.
source_groups = self.aim_mgr.find(self.aim_ctx,
aim_resource.SpanVsourceGroup)
self.aim_mgr.delete(self.aim_ctx, source_groups[0])
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
source_groups = self.aim_mgr.find(self.aim_ctx,
aim_resource.SpanVsourceGroup)
self.aim_mgr.delete(self.aim_ctx, source_groups[0])
self._validate_repair_validate()
def test_network_scope(self):
@ -1117,17 +1173,18 @@ class TestNeutronMapping(AimValidationTestCase):
epg_dn = net['apic:distinguished_names']['EndpointGroup']
# Delete the network's mapping record and test.
(self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
delete())
with self.db_session.begin():
(self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
delete())
# delete BridgeDomain.
bd = aim_resource.BridgeDomain.from_dn(bd_dn)
self.aim_mgr.delete(self.aim_ctx, bd)
# delete BridgeDomain.
bd = aim_resource.BridgeDomain.from_dn(bd_dn)
self.aim_mgr.delete(self.aim_ctx, bd)
# delete EndpointGroup.
epg = aim_resource.EndpointGroup.from_dn(epg_dn)
self.aim_mgr.delete(self.aim_ctx, epg)
# delete EndpointGroup.
epg = aim_resource.EndpointGroup.from_dn(epg_dn)
self.aim_mgr.delete(self.aim_ctx, epg)
# self._validate_scoped(["router"], None)
self._validate_repair_validate_scoped(["network"], None)
@ -1136,7 +1193,9 @@ class TestNeutronMapping(AimValidationTestCase):
# setting scope to security group but
# should validate common tenant resources
tenant = aim_resource.Tenant(name='common')
self.aim_mgr.delete(self.aim_ctx, tenant)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, tenant)
self._validate_repair_validate_scoped(["security_group"], None)
net_resp1 = self._make_network(
@ -1146,11 +1205,15 @@ class TestNeutronMapping(AimValidationTestCase):
epg_dn1 = net1['apic:distinguished_names']['EndpointGroup']
bd1 = aim_resource.BridgeDomain.from_dn(bd_dn1)
self.aim_mgr.delete(self.aim_ctx, bd1)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, bd1)
# delete EndpointGroup.
epg1 = aim_resource.EndpointGroup.from_dn(epg_dn1)
self.aim_mgr.delete(self.aim_ctx, epg1)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, epg1)
net_resp2 = self._make_network(
self.fmt, 'net2', True, tenant_id='ten_2')
@ -1159,11 +1222,15 @@ class TestNeutronMapping(AimValidationTestCase):
epg_dn2 = net2['apic:distinguished_names']['EndpointGroup']
bd2 = aim_resource.BridgeDomain.from_dn(bd_dn2)
self.aim_mgr.delete(self.aim_ctx, bd2)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, bd2)
# delete EndpointGroup.
epg2 = aim_resource.EndpointGroup.from_dn(epg_dn2)
self.aim_mgr.delete(self.aim_ctx, epg2)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, epg2)
self._validate_repair_validate_scoped(None, ['prj_ten_1'])
self._validate_repair_validate_scoped(None, ['prj_ten_2'])
@ -1184,14 +1251,18 @@ class TestNeutronMapping(AimValidationTestCase):
aim_sg = aim_resource.SecurityGroup(
name=sg_name, tenant_name=tenant_name)
self._test_aim_resource(aim_sg)
self.aim_mgr.delete(self.aim_ctx, aim_sg)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, aim_sg)
# Test the AIM SecurityGroupSubject.
aim_subject = aim_resource.SecurityGroupSubject(
name='default', security_group_name=sg_name,
tenant_name=tenant_name)
self._test_aim_resource(aim_subject)
self.aim_mgr.delete(self.aim_ctx, aim_subject)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, aim_subject)
# Test the AIM SecurityGroupRule.
aim_rule = aim_resource.SecurityGroupRule(
@ -1200,11 +1271,15 @@ class TestNeutronMapping(AimValidationTestCase):
security_group_name=sg_name,
tenant_name=tenant_name)
self._test_aim_resource(aim_rule)
self.aim_mgr.delete(self.aim_ctx, aim_rule)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, aim_rule)
aim_tenant = aim_resource.Tenant(name=tenant_name)
self._test_aim_resource(aim_tenant)
self.aim_mgr.delete(self.aim_ctx, aim_tenant)
# TODO(pulkit): replace with AIM writer context once API supports it.
with self.db_session.begin():
self.aim_mgr.delete(self.aim_ctx, aim_tenant)
self._validate_repair_validate_scoped(None, [tenant_name])

View File

@ -1,5 +1,5 @@
[tox]
envlist = py36,py37,pep8
envlist = py36,py37,pep8,py38
minversion = 3.2.0
skipsdist = False
ignore_basepython_conflict = True