From 978b13f548ffdfbf2e5ee525e005b0842ecd4ae1 Mon Sep 17 00:00:00 2001 From: Kobi Samoray Date: Mon, 12 Oct 2020 17:43:02 +0300 Subject: [PATCH] Address various issues with Octavia delete cascade Cascaded loadbalancer deletion with Octavia was broken in various ways. Some NSXV specific while others are within the common code. Change-Id: Id5148b6d496645c49fccb50e33d6a371e98bbb85 --- .../lbaas/nsx_v/implementation/member_mgr.py | 6 ++- .../lbaas/nsx_v/implementation/pool_mgr.py | 19 ++++---- .../lbaas/octavia/octavia_listener.py | 47 +++++++++++++------ 3 files changed, 46 insertions(+), 26 deletions(-) diff --git a/vmware_nsx/services/lbaas/nsx_v/implementation/member_mgr.py b/vmware_nsx/services/lbaas/nsx_v/implementation/member_mgr.py index e2fbeadf1f..a99db4fe02 100644 --- a/vmware_nsx/services/lbaas/nsx_v/implementation/member_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_v/implementation/member_mgr.py @@ -38,11 +38,13 @@ class EdgeMemberManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): def _get_pool_lb_id(self, member): if not member.get('pool'): return - listener = member['pool']['listener'] + listener = member['pool'].get('listener') if listener: lb_id = listener['loadbalancer_id'] + elif member['pool'].get('loadbalancer_id'): + lb_id = member['pool']['loadbalancer_id'] else: - lb_id = member['pool']['loadbalancer']['id'] + lb_id = member['pool'].get('loadbalancer', []).get('id') return lb_id def _get_pool_member_ips(self, pool, operation, address): diff --git a/vmware_nsx/services/lbaas/nsx_v/implementation/pool_mgr.py b/vmware_nsx/services/lbaas/nsx_v/implementation/pool_mgr.py index 1643938e6f..7988594e13 100644 --- a/vmware_nsx/services/lbaas/nsx_v/implementation/pool_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_v/implementation/pool_mgr.py @@ -174,15 +174,16 @@ class EdgePoolManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener['id']) - vse = listener_mgr.listener_to_edge_vse( - context, - listener, - lb_binding['vip_address'], - None, - listener_binding['app_profile_id']) - with locking.LockManager.get_lock(edge_id): - self.vcns.update_vip( - edge_id, listener_binding['vse_id'], vse) + if listener_binding: + vse = listener_mgr.listener_to_edge_vse( + context, + listener, + lb_binding['vip_address'], + None, + listener_binding['app_profile_id']) + with locking.LockManager.get_lock(edge_id): + self.vcns.update_vip( + edge_id, listener_binding['vse_id'], vse) self.vcns.delete_pool(edge_id, edge_pool_id) completor(success=True) nsxv_db.del_nsxv_lbaas_pool_binding( diff --git a/vmware_nsx/services/lbaas/octavia/octavia_listener.py b/vmware_nsx/services/lbaas/octavia/octavia_listener.py index be9494e5de..b58ef68085 100644 --- a/vmware_nsx/services/lbaas/octavia/octavia_listener.py +++ b/vmware_nsx/services/lbaas/octavia/octavia_listener.py @@ -324,26 +324,43 @@ class NSXOctaviaListenerEndpoint(object): def dummy_completor(success=True): pass + completor = self.get_completor_func(constants.LOADBALANCERS, + loadbalancer, delete=True) + + listener_dict = {} # Go over the LB tree and delete one by one using the cascade # api implemented for each resource - for listener in loadbalancer.get('listeners', []): - for policy in listener.get('l7policies', []): - for rule in policy.get('rules', []): - self.l7rule.delete_cascade(ctx, rule, dummy_completor) - self.l7policy.delete_cascade(ctx, policy, dummy_completor) - self.listener.delete_cascade(ctx, listener, dummy_completor) - for pool in loadbalancer.get('pools', []): - for member in pool.get('members', []): - self.member.delete_cascade(ctx, member, dummy_completor) - if pool.get('healthmonitor'): - self.healthmonitor.delete_cascade( - ctx, pool['healthmonitor'], dummy_completor) - self.pool.delete_cascade(ctx, pool, dummy_completor) + try: + for listener in loadbalancer.get('listeners', []): + listener['loadbalancer'] = loadbalancer + listener_dict[listener['id']] = listener + for policy in listener.get('l7policies', []): + for rule in policy.get('rules', []): + self.l7rule.delete_cascade(ctx, rule, dummy_completor) + self.l7policy.delete_cascade(ctx, policy, dummy_completor) + self.listener.delete_cascade(ctx, listener, dummy_completor) + for pool in loadbalancer.get('pools', []): + if not pool.get('loadbalancer'): + pool['loadbalancer'] = loadbalancer + if pool.get('listener_id'): + pool['listener'] = listener_dict[pool['listener_id']] + pool['listeners'] = [pool['listener']] + for member in pool.get('members', []): + if not member.get('pool'): + member['pool'] = pool + self.member.delete_cascade(ctx, member, dummy_completor) + if pool.get('healthmonitor'): + self.healthmonitor.delete_cascade( + ctx, pool['healthmonitor'], dummy_completor) + self.pool.delete_cascade(ctx, pool, dummy_completor) + except Exception as e: + LOG.error('NSX driver loadbalancer_delete_cascade failed to ' + 'delete sub-object %s', e) + completor(success=False) + return False # Delete the loadbalancer itself with the completor that marks all # as deleted - completor = self.get_completor_func(constants.LOADBALANCERS, - loadbalancer, delete=True) try: self.loadbalancer.delete_cascade( ctx, loadbalancer, self.get_completor_func(