Add loadbalancer params to lb_policy

I need attach a loadbalance policy to my cluster with a existed
loadbalancer, but senlin cannot surpport it and it report an error with
"Failed in creating loadbalancer: Conflict (HTTP 409)"

To fix this issue I add three params loadbalancer, pool_id and health
monitor id to LoadBalancingPolicy, then add member to the existing loadbalancer
directly other than always create a new one.

Change-Id: I0e5a98c5c6dcd0b4e884d2a4d4dc0a5a8e92ef19
Closes-Bug: 1715070
This commit is contained in:
TingtingYu 2017-09-05 14:44:08 +08:00
parent 52811c7e9e
commit 921fde986a
2 changed files with 116 additions and 14 deletions

View File

@ -68,17 +68,17 @@ class LoadBalancingPolicy(base.Policy):
]
KEYS = (
POOL, VIP, HEALTH_MONITOR, LB_STATUS_TIMEOUT
POOL, VIP, HEALTH_MONITOR, LB_STATUS_TIMEOUT, LOADBALANCER
) = (
'pool', 'vip', 'health_monitor', 'lb_status_timeout'
'pool', 'vip', 'health_monitor', 'lb_status_timeout', 'loadbalancer'
)
_POOL_KEYS = (
POOL_PROTOCOL, POOL_PROTOCOL_PORT, POOL_SUBNET,
POOL_LB_METHOD, POOL_ADMIN_STATE_UP, POOL_SESSION_PERSISTENCE,
POOL_LB_METHOD, POOL_ADMIN_STATE_UP, POOL_SESSION_PERSISTENCE, POOL_ID,
) = (
'protocol', 'protocol_port', 'subnet',
'lb_method', 'admin_state_up', 'session_persistence',
'lb_method', 'admin_state_up', 'session_persistence', 'id',
)
PROTOCOLS = (
@ -115,10 +115,10 @@ class LoadBalancingPolicy(base.Policy):
HEALTH_MONITOR_KEYS = (
HM_TYPE, HM_DELAY, HM_TIMEOUT, HM_MAX_RETRIES, HM_ADMIN_STATE_UP,
HM_HTTP_METHOD, HM_URL_PATH, HM_EXPECTED_CODES,
HM_HTTP_METHOD, HM_URL_PATH, HM_EXPECTED_CODES, HM_ID,
) = (
'type', 'delay', 'timeout', 'max_retries', 'admin_state_up',
'http_method', 'url_path', 'expected_codes',
'http_method', 'url_path', 'expected_codes', 'id',
)
_SESSION_PERSISTENCE_KEYS = (
@ -179,6 +179,11 @@ class LoadBalancingPolicy(base.Policy):
},
default={},
),
POOL_ID: schema.String(
_('ID of pool for the cluster on which nodes can '
'be connected.'),
default=None,
),
},
),
VIP: schema.Map(
@ -258,12 +263,21 @@ class LoadBalancingPolicy(base.Policy):
HM_EXPECTED_CODES: schema.String(
_('Expected HTTP codes for a passing HTTP(S) monitor.'),
),
HM_ID: schema.String(
_('ID of the health manager for the loadbalancer.'),
default=None,
),
},
),
LB_STATUS_TIMEOUT: schema.Integer(
_('Time in second to wait for loadbalancer to become ready '
'after senlin requests LBaaS V2 service for operations.'),
default=300,
),
LOADBALANCER: schema.String(
_('Name or ID of loadbalancer for the cluster on which nodes can '
'be connected.'),
default=None,
)
}
@ -274,7 +288,7 @@ class LoadBalancingPolicy(base.Policy):
self.vip_spec = self.properties.get(self.VIP, {})
self.hm_spec = self.properties.get(self.HEALTH_MONITOR, None)
self.lb_status_timeout = self.properties.get(self.LB_STATUS_TIMEOUT)
self.lb = None
self.lb = self.properties.get(self.LOADBALANCER, None)
def validate(self, context, validate_props=False):
super(LoadBalancingPolicy, self).validate(context, validate_props)
@ -320,10 +334,19 @@ class LoadBalancingPolicy(base.Policy):
# TODO(Anyone): Check if existing nodes has conflicts regarding the
# subnets. Each VM addresses detail has a key named to the network
# which can be used for validation.
res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec,
self.hm_spec)
if res is False:
return False, data
if self.lb:
data = {}
data['preexisting'] = True
data['loadbalancer'] = self.lb
data['pool'] = self.pool_spec.get(self.POOL_ID, None)
data['vip_address'] = self.vip_spec.get(self.VIP_ADDRESS, None)
if self.hm_spec.get(self.HM_ID, None):
data['healthmonitor'] = self.hm_spec.get(self.HM_ID, None)
else:
res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec,
self.hm_spec)
if res is False:
return False, data
port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
subnet = self.pool_spec.get(self.POOL_SUBNET)
@ -371,9 +394,11 @@ class LoadBalancingPolicy(base.Policy):
if policy_data is None:
return True, reason
res, reason = lb_driver.lb_delete(**policy_data)
if res is False:
return False, reason
is_existed = policy_data.get('preexisting', False)
if not is_existed:
res, reason = lb_driver.lb_delete(**policy_data)
if res is False:
return False, reason
for node in cluster.nodes:
if 'lb_member' in node.data:

View File

@ -36,6 +36,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
'version': '1.0',
'properties': {
'pool': {
'id': '',
'protocol': 'HTTP',
'protocol_port': 80,
'subnet': 'internal-subnet',
@ -97,6 +98,7 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
'version': '1.0',
'properties': {
'pool': {
'id': None,
'protocol': 'HTTP',
'protocol_port': 80,
'subnet': 'internal-subnet',
@ -127,6 +129,38 @@ class TestLoadBalancingPolicy(base.SenlinTestCase):
policy.lb_status_timeout)
self.assertIsNone(policy.lb)
def test_loadbalancer_value(self):
spec = {
'type': 'senlin.policy.loadbalance',
'version': '1.0',
'properties': {
'loadbalancer': 'LB_ID',
'pool': {
'id': 'POOL_ID',
'subnet': 'internal-subnet'
},
'vip': {
'address': '192.168.1.100',
'subnet': 'external-subnet'
},
'health_monitor': {
'id': 'HM_ID'
}
}
}
self.spec['properties']['pool']['id'] = 'POOL_ID'
self.spec['properties']['health_monitor']['id'] = 'HM_ID'
self.spec['properties']['loadbalancer'] = 'LB_ID'
self.spec['properties']['pool']['session_persistence'] = {}
self.spec['properties']['vip']['connection_limit'] = -1
policy = lb_policy.LoadBalancingPolicy('test-policy', spec)
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.loadbalance-1.0', policy.type)
self.assertEqual(self.spec['properties']['pool'], policy.pool_spec)
self.assertEqual(self.spec['properties']['vip'], policy.vip_spec)
self.assertEqual(self.spec['properties']['loadbalancer'], policy.lb)
@mock.patch.object(policy_base.Policy, 'validate')
def test_validate_shallow(self, mock_validate):
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)
@ -523,8 +557,51 @@ class TestLoadBalancingPolicyOperations(base.SenlinTestCase):
self.lb_driver.lb_delete.assert_called_once_with(**policy_data)
self.assertEqual({}, cluster.data)
def test_detach_existed_lbass_succeeded(self, m_extract, m_load):
cp = mock.Mock()
policy_data = {
'loadbalancer': 'LB_ID',
'listener': 'LISTENER_ID',
'pool': 'POOL_ID',
'healthmonitor': 'HM_ID',
'preexisting': True,
}
cp_data = {
'LoadBalancingPolicy': {
'version': '1.0',
'data': policy_data
}
}
cp.data = cp_data
m_load.return_value = cp
m_extract.return_value = policy_data
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)
policy._lbaasclient = self.lb_driver
cluster = mock.Mock(
id='CLUSTER_ID',
data={
'loadbalancers': {
policy.id: {'vip_address': '192.168.1.100'}
}
})
node = mock.Mock(id='fake', data={})
cluster.nodes = [node]
res, data = policy.detach(cluster)
self.assertTrue(res)
self.assertEqual('LB resources deletion succeeded.', data)
m_load.assert_called_once_with(mock.ANY, cluster.id, policy.id)
m_extract.assert_called_once_with(cp_data)
self.assertEqual({}, cluster.data)
def test_detach_failed_lb_delete(self, m_extract, m_load):
cluster = mock.Mock()
policy_data = {
'preexisting': False,
}
m_extract.return_value = policy_data
self.lb_driver.lb_delete.return_value = (False, 'lb_delete failed.')
policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec)