[AIM] Fix legacy SNAT migration

Avoid issues caused by missing NetworkMapping DB records when cleaning
up the legacy plugin's SNAT-related resources. The test_legacy_cleanup
UT is enhanced to create a router and an interfaced internal network
and to delete all mapping and extension records, so that code
manipulating legacy SNAT resources with missing mapping records is
exercised.

Missing NetworkMapping and AddressScopeMapping records are silently
ignored in delete_*_precommit methods. They are logged and ignored in
extend_*_dict methods.

When an AddressScopeMapping or a NetworkMapping record is added, the
corresponding AddressScope or Network instance's aim_mapping
relationship field is updated to reference it. This avoids aim_mapping
being None when the AddressScope or Network instance is subsequently
retrieved from the session cache within the same transaction.

Change-Id: I84f3c401df7c79deb9151b0b232262139fca9429
This commit is contained in:
Robert Kukura 2018-11-29 18:20:30 -05:00
parent 49f3311eb9
commit add2c14abd
4 changed files with 140 additions and 37 deletions

View File

@ -74,13 +74,24 @@ class NetworkMapping(model_base.BASEV2):
class DbMixin(object):
def _add_address_scope_mapping(self, session, scope_id, vrf,
vrf_owned=True):
vrf_owned=True, update_scope=True):
mapping = AddressScopeMapping(
scope_id=scope_id,
vrf_name=vrf.name,
vrf_tenant_name=vrf.tenant_name,
vrf_owned=vrf_owned)
session.add(mapping)
if update_scope:
# The AddressScope instance should already be in the
# session cache, so this should not add another DB
# roundtrip. It needs to be updated in case something
# within the same transaction tries to access its
# aim_mapping relationship after retrieving the
# AddressScope record from the session cache.
scope = (session.query(as_db.AddressScope).
filter_by(id=scope_id).
one_or_none())
scope.aim_mapping = mapping
return mapping
def _get_address_scope_mapping(self, session, scope_id):
@ -111,7 +122,7 @@ class DbMixin(object):
name=mapping.vrf_name)
def _add_network_mapping(self, session, network_id, bd, epg, vrf,
ext_net=None):
ext_net=None, update_network=True):
if not ext_net:
mapping = NetworkMapping(
network_id=network_id,
@ -131,6 +142,17 @@ class DbMixin(object):
vrf_name=vrf.name,
vrf_tenant_name=vrf.tenant_name)
session.add(mapping)
if update_network:
# The Network instance should already be in the session
# cache, so this should not add another DB roundtrip. It
# needs to be updated in case something within the same
# transaction tries to access its aim_mapping relationship
# after retrieving the Network record from the session
# cache.
net = (session.query(models_v2.Network).
filter_by(id=network_id).
one_or_none())
net.aim_mapping = mapping
return mapping
def _get_network_mapping(self, session, network_id):
@ -173,6 +195,12 @@ class DbMixin(object):
def _get_network_l3out(self, mapping):
if not mapping:
# REVISIT: Is this still needed now that
# _add_network_mapping updates the Network instance's
# aim_mapping? If so, the test should probably be moved to
# the caller to make all these
# _get_<neutron-resource>_<aim-resource> methods more
# consistent.
return None
return aim_resource.L3Outside(
tenant_name=mapping.l3out_tenant_name,

View File

@ -803,16 +803,18 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# Before we can clean up the default vrf, we have to
# remove the association in the network_mapping first.
mapping = self._get_network_mapping(session, current['id'])
self._set_network_vrf(mapping, self._map_unrouted_vrf())
if mapping:
self._set_network_vrf(mapping, self._map_unrouted_vrf())
vrf = self._map_default_vrf(session, current)
self._cleanup_default_vrf(aim_ctx, vrf)
else:
mapping = self._get_network_mapping(session, current['id'])
bd = self._get_network_bd(mapping)
self.aim.delete(aim_ctx, bd)
epg = self._get_network_epg(mapping)
self.aim.delete(aim_ctx, epg)
session.delete(mapping)
if mapping:
bd = self._get_network_bd(mapping)
self.aim.delete(aim_ctx, bd)
epg = self._get_network_epg(mapping)
self.aim.delete(aim_ctx, epg)
session.delete(mapping)
def extend_network_dict_bulk(self, session, results, single=False):
# Gather db objects
@ -1135,7 +1137,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
aim_ctx = aim_context.AimContext(session)
mapping = self._get_address_scope_mapping(session, current['id'])
if mapping.vrf_owned:
if mapping and mapping.vrf_owned:
vrf = self._get_address_scope_vrf(mapping)
session.delete(mapping)
scopes = self._get_address_scopes_owning_vrf(session, vrf)
@ -1349,6 +1351,11 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF)):
ip_address, subnet_db, network_db = intf
if not network_db.aim_mapping:
LOG.warning(
"Mapping missing for network %s in extend_router_dict" %
network_db.id)
continue
if network_db.aim_mapping.bd_name:
bd = self._get_network_bd(network_db.aim_mapping)
@ -1374,6 +1381,12 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
for scope_id in scope_ids:
scope_db = self._scope_by_id(session, scope_id)
if not scope_db.aim_mapping:
LOG.warning(
"Mapping missing for address scope %s in "
"extend_router_dict" % scope_db.id)
continue
vrf = self._get_address_scope_vrf(scope_db.aim_mapping)
dist_names[a_l3.SCOPED_VRF % scope_id] = vrf.dn
sync_state = self._merge_status(aim_ctx, sync_state, vrf)
@ -1546,9 +1559,6 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# interfaces.
if not net_intfs:
# First interface for network.
network_db.aim_mapping = (network_db.aim_mapping or
self._get_network_mapping(session,
network_db.id))
if network_db.aim_mapping.epg_name:
bd, epg = self._associate_network_with_vrf(
context, aim_ctx, network_db, vrf, nets_to_notify)
@ -4232,7 +4242,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
else:
vrf = self._map_address_scope(mgr.expected_session, scope_db)
mapping = self._add_address_scope_mapping(
mgr.expected_session, scope_db.id, vrf)
mgr.expected_session, scope_db.id, vrf, update_scope=False)
vrf = self._get_address_scope_vrf(mapping)
vrf.monitored = not mapping.vrf_owned
vrf.display_name = (
@ -4363,7 +4373,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# Expect NetworkMapping record if applicable.
if bd or epg or vrf or ext_net:
self._add_network_mapping(
mgr.expected_session, net_db.id, bd, epg, vrf, ext_net)
mgr.expected_session, net_db.id, bd, epg, vrf, ext_net,
update_network=False)
def _get_router_ext_contracts(self, mgr):
# Get external contracts for routers.

View File

@ -13,13 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
# The following are imported at the beginning to ensure
# that the patches are applied before any of the
# modules save a reference to the functions being patched
from gbpservice.neutron.plugins.ml2plus import patch_neutron # noqa
# The following are imported at the beginning to ensure that the
# patches are applied before any of the modules save a reference to
# the functions being patched. The order is also important.
from gbpservice.neutron.extensions import patch # noqa
from gbpservice.neutron.plugins.ml2plus import patch_neutron # noqa
from oslo_db.sqlalchemy import utils as sa_utils
# REVISIT: Remove this as soon as possible.

View File

@ -871,6 +871,20 @@ class TestNeutronMapping(AimValidationTestCase):
self._validate_fails_binding_ports()
def test_legacy_cleanup(self):
# Create pre-existing AIM VRF.
vrf = aim_resource.VRF(tenant_name='common', name='v1', monitored=True)
self.aim_mgr.create(self.aim_ctx, vrf)
# Create pre-existing AIM L3Outside.
l3out = aim_resource.L3Outside(
tenant_name='common', name='l1', vrf_name='v1', monitored=True)
self.aim_mgr.create(self.aim_ctx, l3out)
# Create pre-existing AIM ExternalNetwork.
ext_net = aim_resource.ExternalNetwork(
tenant_name='common', l3out_name='l1', name='n1', monitored=True)
self.aim_mgr.create(self.aim_ctx, ext_net)
# Create external network.
kwargs = {'router:external': True,
'apic:distinguished_names':
@ -880,47 +894,97 @@ class TestNeutronMapping(AimValidationTestCase):
arg_list=self.extension_attributes, **kwargs)['network']
ext_net_id = ext_net['id']
# Create router using external network.
kwargs = {'external_gateway_info': {'network_id': ext_net_id}}
router = self._make_router(
self.fmt, self._tenant_id, 'router1',
arg_list=self.extension_attributes, **kwargs)['router']
router_id = router['id']
# Create internal network and subnet.
int_net_resp = self._make_network(self.fmt, 'net1', True)
int_net = int_net_resp['network']
int_net_id = int_net['id']
int_subnet = self._make_subnet(
self.fmt, int_net_resp, '10.0.1.1', '10.0.1.0/24')['subnet']
int_subnet_id = int_subnet['id']
# Add internal subnet to router.
self.l3_plugin.add_router_interface(
n_context.get_admin_context(), router_id,
{'subnet_id': int_subnet_id})
# Validate just to make sure everything is OK before creating
# legacy plugin's SNAT-related resources.
self._validate()
# Create legacy plugin's SNAT-related Neutron network.
net_resp = self._make_network(
leg_net_resp = self._make_network(
self.fmt,
'host-snat-network-for-internal-use-' + ext_net_id, False)
net = net_resp['network']
net_id = net['id']
leg_net = leg_net_resp['network']
leg_net_id = leg_net['id']
# Create legacy plugin's SNAT-related Neutron subnet.
subnet = self._make_subnet(
self.fmt, net_resp, '66.66.66.1', '66.66.66.0/24',
leg_subnet = self._make_subnet(
self.fmt, leg_net_resp, '66.66.66.1', '66.66.66.0/24',
enable_dhcp=False)['subnet']
subnet_id = subnet['id']
leg_subnet_id = leg_subnet['id']
data = {'subnet': {'name': 'host-snat-pool-for-internal-use'}}
subnet = self._update('subnets', subnet_id, data)['subnet']
leg_subnet = self._update('subnets', leg_subnet_id, data)['subnet']
# Create legacy plugin's SNAT-related Neutron port.
fixed_ips = [{'subnet_id': subnet_id, 'ip_address': '66.66.66.5'}]
port = self._make_port(
self.fmt, net_id, fixed_ips=fixed_ips,
fixed_ips = [{'subnet_id': leg_subnet_id, 'ip_address': '66.66.66.5'}]
leg_port = self._make_port(
self.fmt, leg_net_id, fixed_ips=fixed_ips,
name='host-snat-pool-for-internal-use',
device_owner='host-snat-pool-port-device-owner-internal-use'
)['port']
port_id = port['id']
leg_port_id = leg_port['id']
# Test cleanup of these resources.
# Delete all networks' mapping and extension records to
# simulate migration use case.
net_ids = [ext_net_id, int_net_id, leg_net_id]
(self.db_session.query(db.NetworkMapping).
filter(db.NetworkMapping.network_id.in_(net_ids)).
delete(synchronize_session=False))
(self.db_session.query(ext_db.NetworkExtensionDb).
filter(ext_db.NetworkExtensionDb.network_id.in_(net_ids)).
delete(synchronize_session=False))
# Delete all subnets' extension records to simulate migration
# use case.
subnet_ids = [int_subnet_id, leg_subnet_id]
(self.db_session.query(ext_db.SubnetExtensionDb).
filter(ext_db.SubnetExtensionDb.subnet_id.in_(subnet_ids)).
delete(synchronize_session=False))
# Test migration.
cfg.CONF.set_override(
'migrate_ext_net_dns',
{ext_net_id: 'uni/tn-common/out-l1/instP-n1'},
group='ml2_apic_aim')
self._validate_repair_validate()
# Ensure legacy plugin's SNAT-related resources are gone.
self._show(
'ports', port_id, expected_code=webob.exc.HTTPNotFound.code)
'ports', leg_port_id,
expected_code=webob.exc.HTTPNotFound.code)
self._show(
'subnets', subnet_id, expected_code=webob.exc.HTTPNotFound.code)
'subnets', leg_subnet_id,
expected_code=webob.exc.HTTPNotFound.code)
self._show(
'networks', net_id, expected_code=webob.exc.HTTPNotFound.code)
'networks', leg_net_id,
expected_code=webob.exc.HTTPNotFound.code)
# Ensure new SNAT subnet was properly created on actual
# external network.
ext_subnets = self._show('networks', ext_net_id)['network']['subnets']
self.assertEqual(1, len(ext_subnets))
ext_subnet = self._show('subnets', ext_subnets[0])['subnet']
self.assertEqual(subnet['cidr'], ext_subnet['cidr'])
self.assertEqual(subnet['gateway_ip'], ext_subnet['gateway_ip'])
self.assertEqual(subnet['enable_dhcp'], ext_subnet['enable_dhcp'])
self.assertEqual(leg_subnet['cidr'], ext_subnet['cidr'])
self.assertEqual(leg_subnet['gateway_ip'], ext_subnet['gateway_ip'])
self.assertEqual(leg_subnet['enable_dhcp'], ext_subnet['enable_dhcp'])
self.assertEqual('SNAT host pool', ext_subnet['name'])
self.assertTrue(ext_subnet['apic:snat_host_pool'])
self.assertEqual(ext_net['project_id'], ext_subnet['project_id'])