Deletion of lb pools for octavia.
The deletion of octavia pools check the all lb pools in the setup so, added a logic to check for only current lb when the pool is attached to the lb directly. Files modified: vmware_nsx_tempest_plugin/lib/feature_manager.py Change-Id: I3ee95a2fb9ab287ace932f8f215bf20261c46cb2
This commit is contained in:
@@ -1492,7 +1492,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
statuses = oc_client.show_octavia_lb_status_tree(lb_id)
|
||||
statuses = statuses.get('statuses', statuses)
|
||||
lb = statuses.get('loadbalancer')
|
||||
for listener in lb.get('listeners', []):
|
||||
for listener in lb.get('listeners'):
|
||||
for pool in listener.get('pools'):
|
||||
self.delete_octavia_lb_pool_resources(lb_id, pool)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
@@ -1500,8 +1500,10 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
listener.get('id'))
|
||||
self.wait_for_octavia_loadbalancer_status(lb_id)
|
||||
# delete pools not attached to listener, but loadbalancer
|
||||
lb_pools = self.octavia_admin_pools_client.\
|
||||
tbdel = self.octavia_admin_pools_client.\
|
||||
list_octavia_pools()['pools']
|
||||
lb_pools = [pool for pool in tbdel
|
||||
if pool['loadbalancers'][0]['id'] == lb_id]
|
||||
for i in lb_pools:
|
||||
pool_id = i['id']
|
||||
self.octavia_admin_pools_client.delete_octavia_pool(pool_id)
|
||||
|
||||
@@ -17,5 +17,5 @@ def ceil(a, b):
|
||||
if b == 0:
|
||||
return 0
|
||||
div = a / b
|
||||
mod = 0 if a % b is 0 else 1
|
||||
mod = 0 if a % b == 0 else 1
|
||||
return div + mod
|
||||
|
||||
Reference in New Issue
Block a user