Store random assigned VIP address to cluster data
User is allowed to define lb policy without specifying VIP address. In that case, a random VIP address will be allocated during lb policy attaching which should be exposed to end user through cluster data. This patch addresses this issue. Closes-Bug: #1558930 Change-Id: I9579dfa79563e9ac3868abf552687a2cdb5827b9
This commit is contained in:
parent
dc7b4ec0cc
commit
62ab1d31d3
|
@ -108,10 +108,12 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
LOG.exception(msg)
|
||||
return False, msg
|
||||
result['loadbalancer'] = lb.id
|
||||
result['vip_address'] = lb.vip_address
|
||||
|
||||
res = self._wait_for_lb_ready(lb.id)
|
||||
if res is False:
|
||||
msg = _LE('Failed in creating load balancer (%s).') % lb.id
|
||||
del result['vip_address']
|
||||
_cleanup(msg, **result)
|
||||
return False, msg
|
||||
|
||||
|
@ -131,6 +133,7 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
res = self._wait_for_lb_ready(lb.id)
|
||||
if res is False:
|
||||
msg = _LE('Failed in creating listener (%s).') % listener.id
|
||||
del result['vip_address']
|
||||
_cleanup(msg, **result)
|
||||
return res, msg
|
||||
|
||||
|
@ -148,6 +151,7 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
res = self._wait_for_lb_ready(lb.id)
|
||||
if res is False:
|
||||
msg = _LE('Failed in creating pool (%s).') % pool.id
|
||||
del result['vip_address']
|
||||
_cleanup(msg, **result)
|
||||
return res, msg
|
||||
|
||||
|
@ -173,6 +177,7 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
if res is False:
|
||||
msg = _LE('Failed in creating health monitor (%s).'
|
||||
) % health_monitor.id
|
||||
del result['vip_address']
|
||||
_cleanup(msg, **result)
|
||||
return res, msg
|
||||
|
||||
|
|
|
@ -740,6 +740,11 @@ class ClusterAction(base.Action):
|
|||
res, reason = self.cluster.attach_policy(self.context, policy_id,
|
||||
inputs)
|
||||
result = self.RES_OK if res else self.RES_ERROR
|
||||
|
||||
# Store cluster since its data could have been updated
|
||||
if result == self.RES_OK:
|
||||
self.cluster.store(self.context)
|
||||
|
||||
return result, reason
|
||||
|
||||
def do_detach_policy(self):
|
||||
|
@ -753,6 +758,11 @@ class ClusterAction(base.Action):
|
|||
|
||||
res, reason = self.cluster.detach_policy(self.context, policy_id)
|
||||
result = self.RES_OK if res else self.RES_ERROR
|
||||
|
||||
# Store cluster since its data could have been updated
|
||||
if result == self.RES_OK:
|
||||
self.cluster.store(self.context)
|
||||
|
||||
return result, reason
|
||||
|
||||
def do_update_policy(self):
|
||||
|
|
|
@ -310,6 +310,10 @@ class LoadBalancingPolicy(base.Policy):
|
|||
node.data.update({'lb_member': member_id})
|
||||
node.store(oslo_context.get_current())
|
||||
|
||||
cluster_data_lb = cluster.data.get('loadbalancers', {})
|
||||
cluster_data_lb[self.id] = {'vip_address': data.pop('vip_address')}
|
||||
cluster.data['loadbalancers'] = cluster_data_lb
|
||||
|
||||
policy_data = self._build_policy_data(data)
|
||||
|
||||
return True, policy_data
|
||||
|
@ -345,6 +349,14 @@ class LoadBalancingPolicy(base.Policy):
|
|||
node.data.pop('lb_member')
|
||||
node.store(oslo_context.get_current())
|
||||
|
||||
lb_data = cluster.data.get('loadbalancers', {})
|
||||
if lb_data and isinstance(lb_data, dict):
|
||||
lb_data.pop(self.id, None)
|
||||
if lb_data:
|
||||
cluster.data['loadbalancers'] = lb_data
|
||||
else:
|
||||
cluster.data.pop('loadbalancers')
|
||||
|
||||
return True, reason
|
||||
|
||||
def _get_delete_candidates(self, cluster_id, action):
|
||||
|
|
|
@ -17,8 +17,10 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
def __init__(self, params):
|
||||
self.lb_result = {
|
||||
"loadbalancer": "a36c20d0-18e9-42ce-88fd-82a35977ee8c",
|
||||
"vip_address": "192.168.1.100",
|
||||
"listener": "35cb8516-1173-4035-8dae-0dae3453f37f",
|
||||
"pool": "4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5"
|
||||
"pool": "4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5",
|
||||
"healthmonitor": "0a9ac99d-0a09-4b18-8499-a0796850279a"
|
||||
}
|
||||
|
||||
self.member_id = "9a7aff27-fd41-4ec1-ba4c-3eb92c629313"
|
||||
|
|
|
@ -121,6 +121,7 @@ class TestNeutronLBaaSDriver(base.SenlinTestCase):
|
|||
pool_obj = mock.Mock()
|
||||
hm_obj = mock.Mock()
|
||||
lb_obj.id = 'LB_ID'
|
||||
lb_obj.vip_address = '192.168.1.100'
|
||||
listener_obj.id = 'LISTENER_ID'
|
||||
pool_obj.id = 'POOL_ID'
|
||||
subnet_obj = mock.Mock()
|
||||
|
@ -143,6 +144,7 @@ class TestNeutronLBaaSDriver(base.SenlinTestCase):
|
|||
self.nc.loadbalancer_create.assert_called_once_with(
|
||||
'SUBNET_ID', self.vip['address'], self.vip['admin_state_up'])
|
||||
self.assertEqual('LB_ID', res['loadbalancer'])
|
||||
self.assertEqual('192.168.1.100', res['vip_address'])
|
||||
self.nc.listener_create.assert_called_once_with(
|
||||
'LB_ID', self.vip['protocol'], self.vip['protocol_port'],
|
||||
self.vip['connection_limit'], self.vip['admin_state_up'])
|
||||
|
@ -249,6 +251,7 @@ class TestNeutronLBaaSDriver(base.SenlinTestCase):
|
|||
listener_obj = mock.Mock()
|
||||
pool_obj = mock.Mock()
|
||||
lb_obj.id = 'LB_ID'
|
||||
lb_obj.vip_address = '192.169.1.100'
|
||||
listener_obj.id = 'LISTENER_ID'
|
||||
pool_obj.id = 'POOL_ID'
|
||||
subnet_obj = mock.Mock()
|
||||
|
|
|
@ -2139,6 +2139,7 @@ class ClusterActionTest(base.SenlinTestCase):
|
|||
self.assertEqual('OK', res_msg)
|
||||
cluster.attach_policy.assert_called_once_with(
|
||||
action.context, 'FAKE_POLICY', {'FOO': 'BAR'})
|
||||
cluster.store.assert_called_once_with(action.context)
|
||||
|
||||
def test_do_attach_policy_missing_policy(self, mock_load):
|
||||
cluster = mock.Mock()
|
||||
|
@ -2169,6 +2170,7 @@ class ClusterActionTest(base.SenlinTestCase):
|
|||
self.assertEqual('Success', res_msg)
|
||||
cluster.detach_policy.assert_called_once_with(action.context,
|
||||
'FAKE_POLICY')
|
||||
cluster.store.assert_called_once_with(action.context)
|
||||
|
||||
def test_do_detach_policy_missing_policy(self, mock_load):
|
||||
cluster = mock.Mock()
|
||||
|
|
|
@ -136,6 +136,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
|
|||
@mock.patch.object(lb_policy.LoadBalancingPolicy, '_build_conn_params')
|
||||
def test_attach_succeeded(self, m_conn, m_attach, m_load, m_build):
|
||||
cluster = mock.Mock()
|
||||
cluster.data = {}
|
||||
cluster.id = 'CLUSTER_ID'
|
||||
node1 = mock.Mock()
|
||||
node2 = mock.Mock()
|
||||
|
@ -144,10 +145,12 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
|
|||
m_build.return_value = 'policy_data'
|
||||
data = {
|
||||
'loadbalancer': 'LB_ID',
|
||||
'vip_address': '192.168.1.100',
|
||||
'pool': 'POOL_ID'
|
||||
}
|
||||
|
||||
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)
|
||||
policy.id = 'FAKE_ID'
|
||||
|
||||
self.lb_driver.lb_create.return_value = (True, data)
|
||||
self.lb_driver.member_add.side_effect = ['MEMBER1_ID', 'MEMBER2_ID']
|
||||
|
@ -167,6 +170,10 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
|
|||
node2.data.update.assert_called_once_with({'lb_member': 'MEMBER2_ID'})
|
||||
node1.store.assert_called_once_with(mock.ANY)
|
||||
node2.store.assert_called_once_with(mock.ANY)
|
||||
expected = {
|
||||
policy.id: {'vip_address': '192.168.1.100'}
|
||||
}
|
||||
self.assertEqual(expected, cluster.data['loadbalancers'])
|
||||
|
||||
@mock.patch.object(policy_base.Policy, 'attach')
|
||||
def test_attach_failed_base_return_false(self, mock_attach):
|
||||
|
@ -203,6 +210,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
|
|||
mock_load.return_value = ['node1', 'node2']
|
||||
lb_data = {
|
||||
'loadbalancer': 'LB_ID',
|
||||
'vip_address': '192.168.1.100',
|
||||
'pool': 'POOL_ID'
|
||||
}
|
||||
|
||||
|
@ -416,6 +424,11 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
|
|||
m_extract.return_value = policy_data
|
||||
self.lb_driver.lb_delete.return_value = (True, 'lb_delete succeeded.')
|
||||
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)
|
||||
cluster.data = {
|
||||
'loadbalancers': {
|
||||
policy.id: {'vip_address': '192.168.1.100'}
|
||||
}
|
||||
}
|
||||
|
||||
res, data = policy.detach(cluster)
|
||||
self.assertTrue(res)
|
||||
|
@ -423,6 +436,7 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
|
|||
m_load.assert_called_once_with(mock.ANY, cluster.id, policy.id)
|
||||
m_extract.assert_called_once_with(cp_data)
|
||||
self.lb_driver.lb_delete.assert_called_once_with(**policy_data)
|
||||
self.assertEqual({}, cluster.data)
|
||||
|
||||
def test_detach_failed_lb_delete(self, m_extract, m_load, m_conn):
|
||||
cluster = mock.Mock()
|
||||
|
|
Loading…
Reference in New Issue