Fix LB member status update in error conditions
When we fail in adding a node to the LB pool or removing a node from the LB pool, we should set the node status to WARNING. This is a hint to users that the node is not supposed to function as expected. This patch also fixes the logic about updating action status when checking the LB policy. Change-Id: I0857c35782b42b80e8f0ef8759fb7d144a7813a8
This commit is contained in:
parent
d759b6feb5
commit
efaed3d0c0
3
TODO.rst
3
TODO.rst
|
@ -41,9 +41,8 @@ ENGINE
|
|||
|
||||
POLICY
|
||||
------
|
||||
|
||||
- Support CLUSTER_RECOVER action in LB policy
|
||||
- Mark node ERROR/WARNING if node cannot be added to the LB pool associated
|
||||
with the cluster
|
||||
|
||||
MIDDLE PRIORITY
|
||||
===============
|
||||
|
|
|
@ -466,6 +466,7 @@ class LoadBalancingPolicy(base.Policy):
|
|||
pool_id = policy_data['pool']
|
||||
|
||||
# Remove nodes that will be deleted from lb pool
|
||||
failed_nodes = []
|
||||
for node_id in candidates:
|
||||
node = nm.Node.load(action.context, node_id=node_id)
|
||||
member_id = node.data.get('lb_member', None)
|
||||
|
@ -476,10 +477,18 @@ class LoadBalancingPolicy(base.Policy):
|
|||
|
||||
res = lb_driver.member_remove(lb_id, pool_id, member_id)
|
||||
if res is not True:
|
||||
action.data['status'] = base.CHECK_ERROR
|
||||
action.data['reason'] = _('Failed in removing deleted '
|
||||
'node(s) from lb pool.')
|
||||
return
|
||||
failed_nodes.append(node.id)
|
||||
node.status = consts.NS_WARNING
|
||||
node.status_reason = _('Failed in removing node from lb pool.')
|
||||
|
||||
node.data.pop('lb_member', None)
|
||||
node.store(action.context)
|
||||
|
||||
if failed_nodes:
|
||||
error = _('Failed in removing deleted node(s) from lb pool: %s'
|
||||
) % failed_nodes
|
||||
action.data['status'] = base.CHECK_ERROR
|
||||
action.data['reason'] = error
|
||||
|
||||
return
|
||||
|
||||
|
@ -516,6 +525,7 @@ class LoadBalancingPolicy(base.Policy):
|
|||
subnet = self.pool_spec.get(self.POOL_SUBNET)
|
||||
|
||||
# Add new nodes to lb pool
|
||||
failed_nodes = []
|
||||
for node_id in nodes_added:
|
||||
node = nm.Node.load(action.context, node_id=node_id)
|
||||
member_id = node.data.get('lb_member', None)
|
||||
|
@ -527,12 +537,17 @@ class LoadBalancingPolicy(base.Policy):
|
|||
member_id = lb_driver.member_add(node, lb_id, pool_id, port,
|
||||
subnet)
|
||||
if member_id is None:
|
||||
action.data['status'] = base.CHECK_ERROR
|
||||
action.data['reason'] = _('Failed in adding new node(s) '
|
||||
'into lb pool.')
|
||||
return
|
||||
failed_nodes.append(node.id)
|
||||
node.status = consts.NS_WARNING
|
||||
node.status_reason = _('Failed in adding node into lb pool.')
|
||||
else:
|
||||
node.data.update({'lb_member': member_id})
|
||||
|
||||
node.data.update({'lb_member': member_id})
|
||||
node.store(action.context)
|
||||
|
||||
if failed_nodes:
|
||||
error = _('Failed in adding nodes into lb pool: %s') % failed_nodes
|
||||
action.data['status'] = base.CHECK_ERROR
|
||||
action.data['reason'] = error
|
||||
|
||||
return
|
||||
|
|
|
@ -644,7 +644,7 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
|
|||
def test_post_op_add_nodes_failed(self, m_cluster_get, m_node_load,
|
||||
m_extract, m_load):
|
||||
cluster_id = 'CLUSTER_ID'
|
||||
node1 = mock.Mock(data={})
|
||||
node1 = mock.Mock(id='NODE_ID', data={})
|
||||
action = mock.Mock(data={'creation': {'nodes': ['NODE1_ID']}},
|
||||
context='action_context',
|
||||
action=consts.CLUSTER_RESIZE)
|
||||
|
@ -663,10 +663,15 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
|
|||
|
||||
self.assertIsNone(res)
|
||||
self.assertEqual(policy_base.CHECK_ERROR, action.data['status'])
|
||||
self.assertEqual('Failed in adding new node(s) into lb pool.',
|
||||
action.data['reason'])
|
||||
self.assertEqual("Failed in adding nodes into lb pool: "
|
||||
"['NODE_ID']", action.data['reason'])
|
||||
self.lb_driver.member_add.assert_called_once_with(
|
||||
node1, 'LB_ID', 'POOL_ID', 80, 'test-subnet')
|
||||
self.assertEqual(consts.NS_WARNING, node1.status)
|
||||
self.assertEqual('Failed in adding node into lb pool.',
|
||||
node1.status_reason)
|
||||
self.assertEqual({}, node1.data)
|
||||
node1.store.assert_called_once_with(action.context)
|
||||
|
||||
@mock.patch.object(node_mod.Node, 'load')
|
||||
@mock.patch.object(co.Cluster, 'get')
|
||||
|
@ -759,8 +764,7 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
|
|||
def test_pre_op_del_nodes_failed(self, m_cluster_get, m_node_load,
|
||||
m_extract, m_load):
|
||||
cluster_id = 'CLUSTER_ID'
|
||||
node1 = mock.Mock()
|
||||
node1.data = {'lb_member': 'MEMBER1_ID'}
|
||||
node1 = mock.Mock(id='NODE_ID', data={'lb_member': 'MEMBER1_ID'})
|
||||
action = mock.Mock(
|
||||
action=consts.CLUSTER_RESIZE,
|
||||
context='action_context',
|
||||
|
@ -780,7 +784,12 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
|
|||
|
||||
self.assertIsNone(res)
|
||||
self.assertEqual(policy_base.CHECK_ERROR, action.data['status'])
|
||||
self.assertEqual('Failed in removing deleted node(s) from lb pool.',
|
||||
action.data['reason'])
|
||||
self.assertEqual("Failed in removing deleted node(s) from lb pool: "
|
||||
"['NODE_ID']", action.data['reason'])
|
||||
self.lb_driver.member_remove.assert_called_once_with(
|
||||
'LB_ID', 'POOL_ID', 'MEMBER1_ID')
|
||||
self.assertEqual(consts.NS_WARNING, node1.status)
|
||||
self.assertEqual('Failed in removing node from lb pool.',
|
||||
node1.status_reason)
|
||||
self.assertEqual({}, node1.data)
|
||||
node1.store.assert_called_once_with(action.context)
|
||||
|
|
Loading…
Reference in New Issue