Create disabled members in haproxy
Members that were disabled / admin_state_up=False were simply excluded from the haproxy configuration we pass to the amps. Instead, we should be creating them in a disabled state, so they return in health messages as status "maint", and can be marked OFFLINE via the standard health mechanism, instead of just via override hacks. This also resolves a bug introduced in an earlier change: https://review.openstack.org/#/c/567322/ which caused admin-downed members to stay in NO_MONITOR always. Change-Id: I6615b3ff89d7cef2af52d474aab3a03d947f98be
This commit is contained in:
parent
db69c051ac
commit
c6ff8596db
@ -184,9 +184,15 @@ frontend {{ listener.id }}
|
||||
{% else %}
|
||||
{% set member_backup_opt = "" %}
|
||||
{% endif %}
|
||||
{{ "server %s %s:%d weight %s%s%s%s%s"|e|format(
|
||||
{% if member.enabled %}
|
||||
{% set member_enabled_opt = "" %}
|
||||
{% else %}
|
||||
{% set member_enabled_opt = " disabled" %}
|
||||
{% endif %}
|
||||
{{ "server %s %s:%d weight %s%s%s%s%s%s"|e|format(
|
||||
member.id, member.address, member.protocol_port, member.weight,
|
||||
hm_opt, persistence_opt, proxy_protocol_opt, member_backup_opt)|trim() }}
|
||||
hm_opt, persistence_opt, proxy_protocol_opt, member_backup_opt,
|
||||
member_enabled_opt)|trim() }}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
@ -257,7 +263,7 @@ backend {{ pool.id }}
|
||||
option allbackups
|
||||
timeout connect {{ listener.timeout_member_connect }}
|
||||
timeout server {{ listener.timeout_member_data }}
|
||||
{% for member in pool.members if member.enabled %}
|
||||
{% for member in pool.members %}
|
||||
{{- member_macro(constants, pool, member) -}}
|
||||
{% endfor %}
|
||||
{% endmacro %}
|
||||
|
@ -339,6 +339,26 @@ class TestHaproxyCfg(base.TestCase):
|
||||
self.assertEqual(sample_configs.sample_base_expected_config(
|
||||
backend=be), rendered_obj)
|
||||
|
||||
def test_render_template_disabled_member(self):
|
||||
be = ("backend sample_pool_id_1\n"
|
||||
" mode http\n"
|
||||
" balance roundrobin\n"
|
||||
" cookie SRV insert indirect nocache\n"
|
||||
" fullconn 98\n"
|
||||
" option allbackups\n"
|
||||
" timeout connect 5000\n"
|
||||
" timeout server 50000\n"
|
||||
" server sample_member_id_1 10.0.0.99:82 weight 13 "
|
||||
"cookie sample_member_id_1\n"
|
||||
" server sample_member_id_2 10.0.0.98:82 weight 13 "
|
||||
"cookie sample_member_id_2 disabled\n\n")
|
||||
rendered_obj = self.jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs.sample_amphora_tuple(),
|
||||
sample_configs.sample_listener_tuple(proto='HTTP', monitor=False,
|
||||
disabled_member=True))
|
||||
self.assertEqual(sample_configs.sample_base_expected_config(
|
||||
backend=be), rendered_obj)
|
||||
|
||||
def test_render_template_ping_monitor_http(self):
|
||||
be = ("backend sample_pool_id_1\n"
|
||||
" mode http\n"
|
||||
|
@ -420,6 +420,7 @@ def sample_listener_tuple(proto=None, monitor=True, persistence=True,
|
||||
l7=False, enabled=True, insert_headers=None,
|
||||
be_proto=None, monitor_ip_port=False,
|
||||
monitor_proto=None, backup_member=False,
|
||||
disabled_member=False,
|
||||
timeout_client_data=50000,
|
||||
timeout_member_connect=5000,
|
||||
timeout_member_data=50000,
|
||||
@ -465,7 +466,7 @@ def sample_listener_tuple(proto=None, monitor=True, persistence=True,
|
||||
persistence_type=persistence_type,
|
||||
persistence_cookie=persistence_cookie,
|
||||
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto,
|
||||
backup_member=backup_member)]
|
||||
backup_member=backup_member, disabled_member=disabled_member)]
|
||||
l7policies = []
|
||||
return in_listener(
|
||||
id='sample_listener_id_1',
|
||||
@ -541,7 +542,8 @@ def sample_tls_container_tuple(id='cont_id_1', certificate=None,
|
||||
def sample_pool_tuple(proto=None, monitor=True, persistence=True,
|
||||
persistence_type=None, persistence_cookie=None,
|
||||
sample_pool=1, monitor_ip_port=False,
|
||||
monitor_proto=None, backup_member=False):
|
||||
monitor_proto=None, backup_member=False,
|
||||
disabled_member=False):
|
||||
proto = 'HTTP' if proto is None else proto
|
||||
monitor_proto = proto if monitor_proto is None else monitor_proto
|
||||
in_pool = collections.namedtuple(
|
||||
@ -557,7 +559,8 @@ def sample_pool_tuple(proto=None, monitor=True, persistence=True,
|
||||
monitor_ip_port=monitor_ip_port),
|
||||
sample_member_tuple('sample_member_id_2', '10.0.0.98',
|
||||
monitor_ip_port=monitor_ip_port,
|
||||
backup=backup_member)]
|
||||
backup=backup_member,
|
||||
enabled=not disabled_member)]
|
||||
if monitor is True:
|
||||
mon = sample_health_monitor_tuple(proto=monitor_proto)
|
||||
elif sample_pool == 2:
|
||||
|
@ -96,6 +96,11 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
mock_pool1 = mock.Mock()
|
||||
mock_pool1.id = "pool-id-1"
|
||||
mock_pool1.members = []
|
||||
if health_monitor:
|
||||
mock_hm1 = mock.Mock()
|
||||
mock_pool1.health_monitor = mock_hm1
|
||||
else:
|
||||
mock_pool1.health_monitor = None
|
||||
mock_lb.pools = [mock_pool1]
|
||||
if mock_listener1:
|
||||
mock_listener1.pools = [mock_pool1]
|
||||
@ -125,7 +130,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
|
||||
@ -243,7 +248,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -289,7 +294,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
@ -324,7 +329,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
|
||||
mock_member2 = mock.Mock()
|
||||
@ -416,7 +421,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -447,7 +452,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"members": {"member-id-1": constants.DRAIN}}}}},
|
||||
"recv_time": time.time()}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -487,7 +492,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"members": {"member-id-1": constants.MAINT}}}}},
|
||||
"recv_time": time.time()}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -527,7 +532,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"members": {"member-id-1": "blah"}}}}},
|
||||
"recv_time": time.time()}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -563,7 +568,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
|
||||
@ -606,7 +611,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree(health_monitor=False))
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
|
||||
@ -629,6 +634,48 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
|
||||
self.member_repo.update.assert_not_called()
|
||||
|
||||
def test_update_health_member_down_no_hm(self):
|
||||
|
||||
health = {
|
||||
"id": self.FAKE_UUID_1,
|
||||
"listeners": {
|
||||
"listener-id-1": {"status": constants.OPEN, "pools": {
|
||||
"pool-id-1": {"status": constants.UP,
|
||||
"members": {"member-id-1": constants.MAINT}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree(health_monitor=False))
|
||||
mock_members[0].admin_state_up = False
|
||||
mock_members[0].operating_status = constants.NO_MONITOR
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
|
||||
self.hm.update_health(health)
|
||||
self.assertTrue(self.amphora_health_repo.replace.called)
|
||||
|
||||
# test listener, member
|
||||
for listener_id, listener in six.iteritems(
|
||||
health.get('listeners', {})):
|
||||
|
||||
self.listener_repo.update.assert_any_call(
|
||||
self.session_mock, listener_id,
|
||||
operating_status=constants.ONLINE)
|
||||
|
||||
for pool_id, pool in six.iteritems(listener.get('pools', {})):
|
||||
|
||||
self.hm.pool_repo.update.assert_any_call(
|
||||
self.session_mock, pool_id,
|
||||
operating_status=constants.ONLINE)
|
||||
|
||||
self.member_repo.update.assert_any_call(
|
||||
self.session_mock, mock_members[0].id,
|
||||
operating_status=constants.OFFLINE)
|
||||
|
||||
def test_update_health_member_no_check(self):
|
||||
|
||||
health = {
|
||||
@ -645,7 +692,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -731,7 +778,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
self.hm.update_health(health)
|
||||
@ -782,7 +829,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
"recv_time": time.time()
|
||||
}
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
|
||||
@ -924,7 +971,7 @@ class TestUpdateHealthDb(base.TestCase):
|
||||
self.hm.loadbalancer_repo.update.side_effect = (
|
||||
[sqlalchemy.orm.exc.NoResultFound])
|
||||
|
||||
mock_lb, mock_listener1, mock_pool1, mock_member1 = (
|
||||
mock_lb, mock_listener1, mock_pool1, mock_members = (
|
||||
self._make_mock_lb_tree())
|
||||
self.amphora_repo.get_all_lbs_on_amphora.return_value = [mock_lb]
|
||||
|
||||
|
@ -0,0 +1,12 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Creating a member on a pool with no healthmonitor would sometimes briefly
|
||||
update their operating status from `NO_MONITOR` to `OFFLINE` and back to
|
||||
`NO_MONITOR` during the provisioning sequence. This flapping will no longer
|
||||
occur.
|
||||
- |
|
||||
Members that are disabled via `admin_state_up=False` are now rendered in
|
||||
the HAProxy configuration on the amphora as `disabled`. Previously they
|
||||
were not rendered at all. This means that disabled members will now
|
||||
appear in health messages, and will properly change status to OFFLINE.
|
Loading…
Reference in New Issue
Block a user