[AIM] Handle the keystone notification project.deleted event

1. Also add a testing script to test the 'gbp purge <tenant ID>' CLI.
2. Also add a config parameter to enable/disable this behavior.

Change-Id: Ic929071564f13027fc5d2bc85589e1614b276064
This commit is contained in:
Kent Wu
2017-03-10 18:25:11 -08:00
parent a6276d4f2c
commit 131bf0fbf4
7 changed files with 193 additions and 32 deletions

View File

@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from gbpclient.v2_0 import client as gbp_client
from keystoneclient import auth as ksc_auth
from keystoneclient import session as ksc_session
from keystoneclient.v3 import client as ksc_client
@@ -39,6 +40,7 @@ class ProjectNameCache(object):
def __init__(self):
self.project_names = {}
self.keystone = None
self.gbp = None
def _get_keystone_client(self):
LOG.debug("Getting keystone client")
@@ -51,7 +53,9 @@ class ProjectNameCache(object):
cfg.CONF, AUTH_GROUP, auth=auth)
LOG.debug("Got session: %s", session)
self.keystone = ksc_client.Client(session=session)
LOG.debug("Got client: %s", self.keystone)
LOG.debug("Got keystone client: %s", self.keystone)
self.gbp = gbp_client.Client(session=session)
LOG.debug("Got gbp client: %s", self.gbp)
def ensure_project(self, project_id):
"""Ensure cache contains mapping for project.
@@ -103,3 +107,10 @@ class ProjectNameCache(object):
self.project_names[project.id] = project.name
return project.name
return None
def purge_gbp(self, project_id):
if self.gbp is None:
self._get_keystone_client()
if self.gbp:
LOG.debug("Calling gbp purge() API")
self.gbp.purge(project_id)

View File

@@ -36,6 +36,11 @@ apic_opts = [
help=("Number of seconds for the optimized DHCP lease time. "
"Default is 0 which means using opflex agent's default "
"value.")),
cfg.BoolOpt('enable_keystone_notification_purge',
default=False,
help=("This will enable purging all the resources including "
"the tenant once a keystone project.deleted "
"notification is received.")),
]

View File

@@ -88,7 +88,7 @@ NO_ADDR_SCOPE = object()
class KeystoneNotificationEndpoint(object):
filter_rule = oslo_messaging.NotificationFilter(
event_type='identity.project.update')
event_type='^identity.project.[updated|deleted]')
def __init__(self, mechanism_driver):
self._driver = mechanism_driver
@@ -101,24 +101,44 @@ class KeystoneNotificationEndpoint(object):
if not tenant_id:
return None
new_project_name = (self._driver.project_name_cache.
update_project_name(tenant_id))
if not new_project_name:
return None
if event_type == 'identity.project.updated':
new_project_name = (self._driver.project_name_cache.
update_project_name(tenant_id))
if not new_project_name:
return None
# we only update tenants which have been created in APIC. For other
# cases, their nameAlias will be set when the first resource is being
# created under that tenant
session = db_api.get_session()
tenant_aname = self._driver.name_mapper.project(session, tenant_id)
aim_ctx = aim_context.AimContext(session)
tenant = aim_resource.Tenant(name=tenant_aname)
if not self._driver.aim.get(aim_ctx, tenant):
return None
# we only update tenants which have been created in APIC. For other
# cases, their nameAlias will be set when the first resource is
# being created under that tenant
session = db_api.get_session()
tenant_aname = self._driver.name_mapper.project(session, tenant_id)
aim_ctx = aim_context.AimContext(session)
tenant = aim_resource.Tenant(name=tenant_aname)
if not self._driver.aim.get(aim_ctx, tenant):
return None
self._driver.aim.update(aim_ctx, tenant,
display_name=aim_utils.sanitize_display_name(new_project_name))
return oslo_messaging.NotificationResult.HANDLED
self._driver.aim.update(aim_ctx, tenant,
display_name=aim_utils.sanitize_display_name(new_project_name))
return oslo_messaging.NotificationResult.HANDLED
if event_type == 'identity.project.deleted':
if not self._driver.enable_keystone_notification_purge:
return None
self.tenant = tenant_id
self._driver.project_name_cache.purge_gbp(self)
# delete the tenant and AP in AIM also
session = db_api.get_session()
tenant_aname = self._driver.name_mapper.project(session, tenant_id)
aim_ctx = aim_context.AimContext(session)
ap = aim_resource.ApplicationProfile(tenant_name=tenant_aname,
name=self._driver.ap_name)
self._driver.aim.delete(aim_ctx, ap)
tenant = aim_resource.Tenant(name=tenant_aname)
self._driver.aim.delete(aim_ctx, tenant)
return oslo_messaging.NotificationResult.HANDLED
class ApicMechanismDriver(api_plus.MechanismDriver):
@@ -169,6 +189,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver):
self._setup_keystone_notification_listeners()
self.apic_optimized_dhcp_lease_time = (cfg.CONF.ml2_apic_aim.
apic_optimized_dhcp_lease_time)
self.enable_keystone_notification_purge = (cfg.CONF.ml2_apic_aim.
enable_keystone_notification_purge)
def _setup_keystone_notification_listeners(self):
targets = [oslo_messaging.Target(

View File

@@ -135,6 +135,17 @@ class ApicAimTestMixin(object):
self.plugin.create_or_update_agent(
n_context.get_admin_context(), agent)
def _check_call_list(self, expected, observed, check_all=True):
for call in expected:
self.assertTrue(call in observed,
msg='Call not found, expected:\n%s\nobserved:'
'\n%s' % (str(call), str(observed)))
observed.remove(call)
if check_all:
self.assertFalse(
len(observed),
msg='There are more calls than expected: %s' % str(observed))
class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
test_l3.L3NatTestCaseMixin, ApicAimTestMixin):
@@ -1061,15 +1072,33 @@ class TestAimMapping(ApicAimTestCase):
def test_keystone_notification_endpoint(self):
self.driver.aim.get = mock.Mock(return_value=True)
self.driver.aim.update = mock.Mock()
self.driver.aim.delete = mock.Mock()
self.driver.project_name_cache.purge_gbp = mock.Mock()
payload = {}
payload['resource_info'] = 'test-tenant'
keystone_ep = md.KeystoneNotificationEndpoint(self.driver)
keystone_ep.info(None, None, None, payload, None)
tenant_aname = self.name_mapper.project(None, 'test-tenant')
tenant = aim_resource.Tenant(name=tenant_aname)
# first test with project.updated event
keystone_ep.info(None, None, 'identity.project.updated', payload, None)
tenant_name = self.name_mapper.project(None, 'test-tenant')
tenant = aim_resource.Tenant(name=tenant_name)
self.driver.aim.update.assert_called_once_with(
mock.ANY, tenant, display_name='new_name')
# test again with project.deleted event
self.driver.enable_keystone_notification_purge = True
keystone_ep.info(None, None, 'identity.project.deleted', payload, None)
self.assertEqual(keystone_ep.tenant, 'test-tenant')
self.driver.project_name_cache.purge_gbp.assert_called_once_with(
keystone_ep)
ap = aim_resource.ApplicationProfile(tenant_name=tenant_name,
name=self.driver.ap_name)
tenant = aim_resource.Tenant(name=tenant_name)
exp_calls = [
mock.call(mock.ANY, ap),
mock.call(mock.ANY, tenant)]
self._check_call_list(exp_calls, self.driver.aim.delete.call_args_list)
# TODO(rkukura): Test IPv6 and dual stack router interfaces.
def test_shared_address_scope(self):

View File

@@ -184,17 +184,6 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
ksc_client.Client = self.saved_keystone_client
super(AIMBaseTestCase, self).tearDown()
def _check_call_list(self, expected, observed, check_all=True):
for call in expected:
self.assertTrue(call in observed,
msg='Call not found, expected:\n%s\nobserved:'
'\n%s' % (str(call), str(observed)))
observed.remove(call)
if check_all:
self.assertFalse(
len(observed),
msg='There are more calls than expected: %s' % str(observed))
def _setup_external_network(self, name, dn=None, router_tenant=None):
DN = 'apic:distinguished_names'
kwargs = {'router:external': True}

View File

@@ -0,0 +1,104 @@
#!/usr/bin/env bash
# **gbp_purge.sh**
# Sanity check that gbp started if enabled
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
echo "*********************************************************************"
# Settings
# ========
source functions-gbp
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
# Import common functions
source $TOP_DIR/functions
# Import configuration
source $TOP_DIR/openrc
# Import exercise configuration
source $TOP_DIR/exerciserc
source $TOP_DIR/openrc demo demo
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Create servicechain related policies
gbp service-profile-create --vendor heat_based_node_driver --insertion-mode l3 --servicetype FIREWALL fw-profile
gbp servicechain-node-create firewall-node --template-file $TOP_DIR/gbp-templates/firewall-lb-servicechain/fw.template --service-profile fw-profile
gbp servicechain-spec-create firewall-spec --description spec --nodes "firewall-node"
# NSP creation
gbp network-service-policy-create --network-service-params type=ip_single,name=vip_ip,value=self_subnet vip_ip_policy
# Create action that can used in several rules
gbp policy-action-create allow_action --action-type allow
gbp policy-action-create redirect --action-type redirect --action-value firewall-spec
# Create ICMP rule
gbp policy-classifier-create icmp-traffic --protocol icmp --direction bi
gbp policy-rule-create ping-policy-rule --classifier icmp-traffic --actions allow_action
#gbp policy-rule-create ping-policy-rule --classifier icmp-traffic --actions redirect
# ICMP policy-rule-set
gbp policy-rule-set-create icmp-policy-rule-set --policy-rules ping-policy-rule
# ====== PROJECT OPERATION ======
# PTGs creation
gbp group-create --provided-policy-rule-sets "icmp-policy-rule-set" --consumed-policy-rule-sets "icmp-policy-rule-set" --network-service-policy vip_ip_policy web
gbp group-create web1
# PT creation
gbp policy-target-create web-pt-1 --policy-target-group web
# create external network with admin priviledge
source $TOP_DIR/openrc admin admin
EXT_NET_ID=$(neutron net-create mgmt_out --router:external=True --shared | grep ' id ' | awk '{print $4}')
EXT_SUBNET_ID=$(neutron subnet-create --ip_version 4 --gateway 172.16.73.1 --name public-subnet $EXT_NET_ID 172.16.73.0/24 | grep ' id ' | awk '{print $4}')
openstack project list
DEMO_PROJECT_ID=$(openstack project show demo | grep id | awk '{print $4}')
source $TOP_DIR/openrc demo demo
# ES creation
gbp external-segment-create --ip-version 4 --external-route destination=0.0.0.0/0,nexthop=172.16.73.1 --subnet_id=$EXT_SUBNET_ID --cidr 50.50.50.0/24 mgmt_out
gbp l3policy-update --external-segment mgmt_out default
# Nat pool creation
gbp nat-pool-create --ip-version 4 --ip-pool 60.60.60.0/24 --external-segment mgmt_out ext_nat_pool
# External policy creation
gbp external-policy-create --external-segment mgmt_out --provided-policy-rule-sets "icmp-policy-rule-set" --consumed-policy-rule-sets "icmp-policy-rule-set" ext_pol
# purge all the resources
gbp purge $DEMO_PROJECT_ID
PURGE_OUTPUT=$(gbp purge $DEMO_PROJECT_ID | grep 'Tenant has no supported resources')
die_if_not_set $LINENO PURGE_OUTPUT "Failure purging GBP resources"
# delete the neutron resources too
source $TOP_DIR/openrc admin admin
neutron subnet-delete public-subnet
neutron net-delete mgmt_out
check_residual_resources demo demo
check_residual_resources admin admin
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End DevStack Exercise: $0"
echo "*********************************************************************"

View File

@@ -17,6 +17,7 @@ function prepare_gbp_devstack {
cd $TOP_DIR
sudo git checkout stable/newton
sudo cp $CONTRIB_DIR/devstack/local.conf $TOP_DIR/local.conf
sudo cp $CONTRIB_DIR/functions-gbp $TOP_DIR/exercises/
sudo cp $CONTRIB_DIR/devstack/exercises/*.sh $TOP_DIR/exercises/
sudo cp $CONTRIB_DIR/devstack/lib/* $TOP_DIR/lib/
sudo cp -r $CONTRIB_DIR/devstack/gbp-templates $TOP_DIR