Add amphora_id in store params for failover_amphora

Several tasks require amphora_id parameter to be passed in
get_amphora_for_lb_failover_subflow.
Execution passed results in error:

taskflow.exceptions.NotFound: Mapped argument 'amphora_id' <=
'amphora_id' was not produced by any accessible provider
(1 possible providers were scanned).

Also fix getting ID parameter from amphora dict in
AmphoraIndexListenersReload and add missing retries for create_*
resources in v2 worker.

Change-Id: I5ed6288b2776bd7f1c9b67e9cfd9a8f05b1196be
This commit is contained in:
Ann Taraday 2020-10-29 18:46:34 +04:00
parent 58bf439bda
commit 1a154839c2
4 changed files with 43 additions and 10 deletions

View File

@ -434,6 +434,14 @@ class ControllerWorker(object):
flow_utils.get_update_load_balancer_flow,
store=store)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing(
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_member(self, member):
"""Creates a pool member.
@ -693,6 +701,14 @@ class ControllerWorker(object):
flow_utils.get_update_pool_flow,
store=store)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing(
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_l7policy(self, l7policy):
"""Creates an L7 Policy.
@ -759,6 +775,14 @@ class ControllerWorker(object):
flow_utils.get_update_l7policy_flow,
store=store)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing(
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_l7rule(self, l7rule):
"""Creates an L7 Rule.
@ -920,7 +944,8 @@ class ControllerWorker(object):
constants.LOADBALANCER: provider_lb_dict,
constants.SERVER_GROUP_ID: server_group_id,
constants.LOADBALANCER_ID: lb_id,
constants.VIP: vip_dict}
constants.VIP: vip_dict,
constants.AMPHORA_ID: amphora_id}
self.run_flow(
flow_utils.get_failover_amphora_flow,

View File

@ -191,7 +191,7 @@ class AmphoraIndexListenersReload(BaseAmphoraTask):
try:
self.amphora_driver.reload(db_lb, db_amp, timeout_dict)
except Exception as e:
amphora_id = amphorae[amphora_index].id
amphora_id = amphorae[amphora_index][constants.ID]
LOG.warning('Failed to reload listeners on amphora %s. '
'Skipping this amphora as it is failing to '
'reload due to: %s', amphora_id, str(e))

View File

@ -221,7 +221,8 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock,
self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, amphora_mock.id, status=constants.ERROR)
_session_mock, amphora_mock[constants.ID],
status=constants.ERROR)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')

View File

@ -1270,7 +1270,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: mock_provider_lb.to_dict(),
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID: None,
constants.VIP: mock_lb.vip.to_dict()}
constants.VIP: mock_lb.vip.to_dict(),
constants.AMPHORA_ID: AMP_ID}
cw = controller_worker.ControllerWorker()
cw.services_controller.reset_mock()
@ -1325,7 +1326,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: mock_provider_lb.to_dict(),
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID: None,
constants.VIP: mock_lb.vip.to_dict()}
constants.VIP: mock_lb.vip.to_dict(),
constants.AMPHORA_ID: AMP_ID}
cw = controller_worker.ControllerWorker()
cw.services_controller.reset_mock()
@ -1380,7 +1382,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: mock_provider_lb.to_dict(),
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID: SERVER_GROUP_ID,
constants.VIP: mock_lb.vip.to_dict()}
constants.VIP: mock_lb.vip.to_dict(),
constants.AMPHORA_ID: AMP_ID}
cw = controller_worker.ControllerWorker()
cw.services_controller.reset_mock()
@ -1432,7 +1435,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: mock_provider_lb.to_dict(),
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID: SERVER_GROUP_ID,
constants.VIP: mock_lb.vip.to_dict()}
constants.VIP: mock_lb.vip.to_dict(),
constants.AMPHORA_ID: AMP_ID}
cw = controller_worker.ControllerWorker()
cw.services_controller.reset_mock()
@ -1488,7 +1492,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: mock_provider_lb.to_dict(),
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID: None,
constants.VIP: mock_lb.vip.to_dict()}
constants.VIP: mock_lb.vip.to_dict(),
constants.AMPHORA_ID: AMP_ID}
mock_get_flavor_meta.return_value = {'taste': 'spicy'}
cw = controller_worker.ControllerWorker()
@ -1544,7 +1549,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: mock_provider_lb.to_dict(),
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID: None,
constants.VIP: mock_lb.vip.to_dict()}
constants.VIP: mock_lb.vip.to_dict(),
constants.AMPHORA_ID: AMP_ID}
mock_get_az_meta.return_value = {'planet': 'jupiter'}
cw = controller_worker.ControllerWorker()
@ -1635,7 +1641,8 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER: None,
constants.LOADBALANCER_ID: None,
constants.SERVER_GROUP_ID: None,
constants.VIP: {}}
constants.VIP: {},
constants.AMPHORA_ID: AMP_ID}
cw = controller_worker.ControllerWorker()
cw.services_controller.reset_mock()