Fix pool creation with single LB create call

So far, creating a populated LB (or an LB tree) with non attached pools,
left the pools on PENDING_CREATE provisioning_state.

This patch adds the option of marking non attached pools to be active on
the MarkLBActiveInDB task.

Story 2010426
Task 46823

Change-Id: Ie5fb25e292461baca38499091e8f9b9a53e3ddcd
This commit is contained in:
Omer 2022-11-08 17:32:17 +01:00 committed by Omer Schwartz
parent b4b6e07fe0
commit 3a157c0216
9 changed files with 63 additions and 48 deletions

View File

@ -85,12 +85,17 @@ class LoadBalancerFlows(object):
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
lb_create_flow.add(
self.get_post_lb_amp_association_flow(
post_amp_prefix, topology, mark_active=not listeners))
self.get_post_lb_amp_association_flow(post_amp_prefix, topology))
if listeners:
lb_create_flow.add(*self._create_listeners_flow())
lb_create_flow.add(
database_tasks.MarkLBActiveInDB(
mark_subobjects=True,
requires=constants.LOADBALANCER
)
)
return lb_create_flow
def _create_single_topology(self):
@ -220,16 +225,9 @@ class LoadBalancerFlows(object):
flows.append(
self.listener_flows.get_create_all_listeners_flow()
)
flows.append(
database_tasks.MarkLBActiveInDB(
mark_subobjects=True,
requires=constants.LOADBALANCER
)
)
return flows
def get_post_lb_amp_association_flow(self, prefix, topology,
mark_active=True):
def get_post_lb_amp_association_flow(self, prefix, topology):
"""Reload the loadbalancer and create networking subflows for
created/allocated amphorae.
@ -253,10 +251,6 @@ class LoadBalancerFlows(object):
post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
if mark_active:
post_create_LB_flow.add(database_tasks.MarkLBActiveInDB(
name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,
requires=constants.LOADBALANCER))
return post_create_LB_flow
def _get_delete_listeners_flow(self, lb):

View File

@ -936,6 +936,8 @@ class MarkLBActiveInDB(BaseDatabaseTask):
loadbalancer.id)
for listener in loadbalancer.listeners:
self._mark_listener_status(listener, constants.ACTIVE)
for pool in loadbalancer.pools:
self._mark_pool_status(pool, constants.ACTIVE)
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
loadbalancer.id)
@ -1010,14 +1012,20 @@ class MarkLBActiveInDB(BaseDatabaseTask):
"""
if self.mark_subobjects:
LOG.debug("Marking all listeners of loadbalancer %s ERROR",
loadbalancer.id)
LOG.debug("Marking all listeners and pools of loadbalancer %s"
" ERROR", loadbalancer.id)
for listener in loadbalancer.listeners:
try:
self._mark_listener_status(listener, constants.ERROR)
except Exception:
LOG.warning("Error updating listener %s provisioning "
"status", listener.id)
for pool in loadbalancer.pools:
try:
self._mark_pool_status(pool, constants.ERROR)
except Exception:
LOG.warning("Error updating pool %s provisioning "
"status", pool.id)
class UpdateLBServerGroupInDB(BaseDatabaseTask):

View File

@ -91,12 +91,18 @@ class LoadBalancerFlows(object):
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
lb_create_flow.add(
self.get_post_lb_amp_association_flow(
post_amp_prefix, topology, mark_active=not listeners))
self.get_post_lb_amp_association_flow(post_amp_prefix, topology))
if listeners:
lb_create_flow.add(*self._create_listeners_flow())
lb_create_flow.add(
database_tasks.MarkLBActiveInDB(
mark_subobjects=True,
requires=constants.LOADBALANCER
)
)
if CONF.controller_worker.event_notifications:
lb_create_flow.add(
notification_tasks.SendCreateNotification(
@ -225,16 +231,9 @@ class LoadBalancerFlows(object):
flows.append(
self.listener_flows.get_create_all_listeners_flow()
)
flows.append(
database_tasks.MarkLBActiveInDB(
mark_subobjects=True,
requires=constants.LOADBALANCER
)
)
return flows
def get_post_lb_amp_association_flow(self, prefix, topology,
mark_active=True):
def get_post_lb_amp_association_flow(self, prefix, topology):
"""Reload the loadbalancer and create networking subflows for
created/allocated amphorae.
@ -257,10 +256,6 @@ class LoadBalancerFlows(object):
post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
if mark_active:
post_create_LB_flow.add(database_tasks.MarkLBActiveInDB(
name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,
requires=constants.LOADBALANCER))
return post_create_LB_flow
def _get_delete_listeners_flow(self, listeners):

View File

@ -1038,6 +1038,8 @@ class MarkLBActiveInDB(BaseDatabaseTask):
id=loadbalancer[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
self._mark_listener_status(listener, constants.ACTIVE)
for pool in db_lb.pools:
self._mark_pool_status(pool, constants.ACTIVE)
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
loadbalancer[constants.LOADBALANCER_ID])
@ -1112,8 +1114,8 @@ class MarkLBActiveInDB(BaseDatabaseTask):
"""
if self.mark_subobjects:
LOG.debug("Marking all listeners of loadbalancer %s ERROR",
loadbalancer[constants.LOADBALANCER_ID])
LOG.debug("Marking all listeners and pools of loadbalancer %s"
" ERROR", loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
@ -1123,6 +1125,12 @@ class MarkLBActiveInDB(BaseDatabaseTask):
except Exception:
LOG.warning("Error updating listener %s provisioning "
"status", listener.id)
for pool in db_lb.pools:
try:
self._mark_pool_status(pool, constants.ERROR)
except Exception:
LOG.warning("Error updating POOL %s provisioning "
"status", pool.id)
class MarkLBActiveInDBByListener(BaseDatabaseTask):

View File

@ -160,7 +160,6 @@ class TestLoadBalancerFlows(base.TestCase):
self.assertEqual(4, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
# Test mark_active=False
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
'123', constants.TOPOLOGY_ACTIVE_STANDBY)

View File

@ -1139,14 +1139,16 @@ class TestDatabaseTasks(base.TestCase):
provisioning_status=constants.ACTIVE),
mock.call('TEST', listeners[1].id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(2, repo.PoolRepository.update.call_count)
self.assertEqual(5, repo.PoolRepository.update.call_count)
repo.PoolRepository.update.assert_has_calls(
[mock.call('TEST', default_pool.id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', redirect_pool.id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', unused_pool.id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(4, repo.MemberRepository.update.call_count)
repo.MemberRepository.update.assert_has_calls(
self.assertEqual(8, repo.MemberRepository.update.call_count)
repo.MemberRepository.update.has_calls(
[mock.call('TEST', members1[0].id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', members1[1].id,
@ -1155,8 +1157,8 @@ class TestDatabaseTasks(base.TestCase):
provisioning_status=constants.ACTIVE),
mock.call('TEST', members2[1].id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(1, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.assert_has_calls(
self.assertEqual(2, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.has_calls(
[mock.call('TEST', health_monitor.id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(1, repo.L7PolicyRepository.update.call_count)
@ -1184,14 +1186,14 @@ class TestDatabaseTasks(base.TestCase):
provisioning_status=constants.ERROR),
mock.call('TEST', listeners[1].id,
provisioning_status=constants.ERROR)])
self.assertEqual(2, repo.PoolRepository.update.call_count)
repo.PoolRepository.update.assert_has_calls(
self.assertEqual(5, repo.PoolRepository.update.call_count)
repo.PoolRepository.update.has_calls(
[mock.call('TEST', default_pool.id,
provisioning_status=constants.ERROR),
mock.call('TEST', redirect_pool.id,
provisioning_status=constants.ERROR)])
self.assertEqual(4, repo.MemberRepository.update.call_count)
repo.MemberRepository.update.assert_has_calls(
self.assertEqual(8, repo.MemberRepository.update.call_count)
repo.MemberRepository.update.has_calls(
[mock.call('TEST', members1[0].id,
provisioning_status=constants.ERROR),
mock.call('TEST', members1[1].id,
@ -1200,8 +1202,8 @@ class TestDatabaseTasks(base.TestCase):
provisioning_status=constants.ERROR),
mock.call('TEST', members2[1].id,
provisioning_status=constants.ERROR)])
self.assertEqual(1, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.assert_has_calls(
self.assertEqual(2, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.has_calls(
[mock.call('TEST', health_monitor.id,
provisioning_status=constants.ERROR)])
self.assertEqual(1, repo.L7PolicyRepository.update.call_count)

View File

@ -208,7 +208,6 @@ class TestLoadBalancerFlows(base.TestCase):
self.assertEqual(2, len(amp_flow.requires), amp_flow.requires)
self.assertEqual(4, len(amp_flow.provides), amp_flow.provides)
# Test mark_active=False
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
'123', constants.TOPOLOGY_ACTIVE_STANDBY)

View File

@ -1315,6 +1315,8 @@ class TestDatabaseTasks(base.TestCase):
[mock.call('TEST', default_pool.id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', redirect_pool.id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', unused_pool.id,
provisioning_status=constants.ACTIVE)])
repo.HealthMonitorRepository.update.assert_has_calls(
[mock.call('TEST', health_monitor.id,
@ -1347,9 +1349,12 @@ class TestDatabaseTasks(base.TestCase):
[mock.call('TEST', default_pool.id,
provisioning_status=constants.ERROR),
mock.call('TEST', redirect_pool.id,
provisioning_status=constants.ERROR)])
self.assertEqual(1, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.assert_has_calls(
provisioning_status=constants.ERROR),
mock.call('TEST', unused_pool.id,
provisioning_status=constants.ERROR)
])
self.assertEqual(2, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.has_calls(
[mock.call('TEST', health_monitor.id,
provisioning_status=constants.ERROR)])
self.assertEqual(1, repo.L7PolicyRepository.update.call_count)

View File

@ -0,0 +1,5 @@
---
fixes:
- |
Fixed a bug that didn't set the correct provisioning_status for unattached
pools when creating a fully-populated load balancer.