Remove user_group option

In Pike[1], we introduced a user_group auto detection for haproxy.
The default user group name is auto-detected for any OS distribution
we support as a base for Amphorae.

user_group remained as an option for admins but was also
marked deprecated in Pike[2].

This patch removes that option altogether.

Story: 2003323
Task: 24357

[1] Ia8fede9d7da4709a48661d1fc595a16d04fcbfa9
[2] https://review.openstack.org/#/c/429398/45/octavia/common/config.py@175

Change-Id: Iddd4162674f116705d2b47062cbf7ca88f2677a6
This commit is contained in:
Nir Magnezi 2018-08-07 12:07:31 +03:00
parent a166c89b64
commit 100858fa79
13 changed files with 11 additions and 40 deletions

View File

@ -174,9 +174,6 @@
# build_active_retries = 300
# build_retry_interval = 5
# This setting is deprecated. It is now automatically discovered.
# user_group =
# Maximum number of entries that can fit in the stick table.
# The size supports "k", "m", "g" suffixes.
# haproxy_stick_size = 10k

View File

@ -49,9 +49,6 @@ class AgentJinjaTemplater(object):
'base_path': CONF.haproxy_amphora.base_path,
'bind_host': CONF.haproxy_amphora.bind_host,
'bind_port': CONF.haproxy_amphora.bind_port,
# TODO(nmagnezi): user_group is Deprecated in Pike,
# should be Removed in R cycle.
'user_group': CONF.haproxy_amphora.user_group,
'controller_list': CONF.health_manager.controller_ip_port_list,
'debug': CONF.debug,
'haproxy_cmd': CONF.haproxy_amphora.haproxy_cmd,

View File

@ -24,7 +24,6 @@ haproxy_cmd = {{ haproxy_cmd }}
respawn_count = {{ respawn_count }}
respawn_interval = {{ respawn_interval }}
use_upstart = {{ use_upstart }}
user_group = {{ user_group }}
[health_manager]
controller_ip_port_list = {{ controller_list|join(', ') }}

View File

@ -102,8 +102,7 @@ class HaproxyAmphoraLoadBalancerDriver(
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'],
user_group=CONF.haproxy_amphora.user_group)
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
@ -140,8 +139,7 @@ class HaproxyAmphoraLoadBalancerDriver(
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'],
user_group=CONF.haproxy_amphora.user_group)
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)

View File

@ -267,11 +267,6 @@ haproxy_amphora_opts = [
default=5,
help=_('Retry timeout between build attempts in '
'seconds.')),
cfg.StrOpt('user_group', deprecated_for_removal=True,
deprecated_reason='This is now automatically discovered '
' and configured.',
help=_('The user group for haproxy to run under inside the '
'amphora.')),
cfg.StrOpt('haproxy_stick_size', default='10k',
help=_('Size of the HAProxy stick table. Accepts k, m, g '
'suffixes. Example: 10k')),

View File

@ -78,20 +78,17 @@ class JinjaTemplater(object):
self.connection_logging = connection_logging
def build_config(self, host_amphora, listener, tls_cert,
socket_path=None,
user_group='nogroup'):
socket_path=None):
"""Convert a logical configuration to the HAProxy version
:param host_amphora: The Amphora this configuration is hosted on
:param listener: The listener configuration
:param tls_cert: The TLS certificates for the listener
:param socket_path: The socket path for Haproxy process
:param user_group: The user group
:return: Rendered configuration
"""
return self.render_loadbalancer_obj(host_amphora, listener,
tls_cert=tls_cert,
user_group=user_group,
socket_path=socket_path)
def _get_template(self):
@ -110,7 +107,6 @@ class JinjaTemplater(object):
def render_loadbalancer_obj(self, host_amphora, listener,
tls_cert=None,
user_group='nogroup',
socket_path=None):
"""Renders a templated configuration from a load balancer object
@ -118,7 +114,6 @@ class JinjaTemplater(object):
:param listener: The listener configuration
:param tls_cert: The TLS certificates for the listener
:param socket_path: The socket path for Haproxy process
:param user_group: The user group
:return: Rendered configuration
"""
loadbalancer = self._transform_loadbalancer(
@ -130,7 +125,6 @@ class JinjaTemplater(object):
socket_path = '%s/%s.sock' % (self.base_amp_path, listener.id)
return self._get_template().render(
{'loadbalancer': loadbalancer,
'user_group': user_group,
'stats_sock': socket_path,
'log_http': self.log_http,
'log_server': self.log_server,

View File

@ -17,7 +17,6 @@
global
daemon
user nobody
group {{ usergroup }}
log {{ log_http | default('/dev/log', true)}} local0
log {{ log_server | default('/dev/log', true)}} local1 notice
stats socket {{ sock_path }} mode 0666 level user

View File

@ -21,7 +21,6 @@
{% set loadbalancer_id = loadbalancer.id %}
{% set usergroup = user_group %}
{% set sock_path = stats_sock %}

View File

@ -28,7 +28,6 @@ class HAProxyCompatTestCase(base.TestCase):
"global\n"
" daemon\n"
" user nobody\n"
" group nogroup\n"
" log /dev/log local0\n"
" log /dev/log local1 notice\n"
" stats socket /var/lib/octavia/sample_listener_id_1.sock"

View File

@ -39,7 +39,6 @@ class AgentJinjaTestCase(base.TestCase):
self.conf.config(group="haproxy_amphora",
base_cert_dir='/var/lib/octavia/certs')
self.conf.config(group="haproxy_amphora", use_upstart='True')
self.conf.config(group="haproxy_amphora", user_group='nogroup')
self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia')
self.conf.config(group="haproxy_amphora", bind_host='0.0.0.0')
self.conf.config(group="haproxy_amphora", bind_port=9443)
@ -67,8 +66,7 @@ class AgentJinjaTestCase(base.TestCase):
'haproxy_cmd = /usr/sbin/haproxy\n'
'respawn_count = 2\n'
'respawn_interval = 2\n'
'use_upstart = True\n'
'user_group = nogroup\n\n'
'use_upstart = True\n\n'
'[health_manager]\n'
'controller_ip_port_list = 192.0.2.10:5555\n'
'heartbeat_interval = 10\n'
@ -101,8 +99,7 @@ class AgentJinjaTestCase(base.TestCase):
'haproxy_cmd = /usr/sbin/haproxy\n'
'respawn_count = 2\n'
'respawn_interval = 2\n'
'use_upstart = False\n'
'user_group = nogroup\n\n'
'use_upstart = False\n\n'
'[health_manager]\n'
'controller_ip_port_list = 192.0.2.10:5555\n'
'heartbeat_interval = 10\n'
@ -138,8 +135,7 @@ class AgentJinjaTestCase(base.TestCase):
'haproxy_cmd = /usr/sbin/haproxy\n'
'respawn_count = 2\n'
'respawn_interval = 2\n'
'use_upstart = True\n'
'user_group = nogroup\n\n'
'use_upstart = True\n\n'
'[health_manager]\n'
'controller_ip_port_list = 192.0.2.10:5555\n'
'heartbeat_interval = 10\n'

View File

@ -50,9 +50,6 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
def setUp(self):
super(TestHaproxyAmphoraLoadBalancerDriverTest, self).setUp()
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="haproxy_amphora", user_group="everyone")
DEST1 = '198.51.100.0/24'
DEST2 = '203.0.113.0/24'
NEXTHOP = '192.0.2.1'
@ -120,9 +117,6 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
self.driver.update_amphora_listeners([mock_listener], 0,
[mock_amphora], self.timeout_dict)
self.driver.jinja.build_config.assert_called_once_with(
host_amphora=mock_amphora, listener=mock_listener,
tls_cert=None, user_group="everyone")
self.driver.client.upload_config.assert_called_once_with(
mock_amphora, mock_listener.id, 'the_config',
timeout_dict=self.timeout_dict)

View File

@ -868,7 +868,6 @@ def sample_base_expected_config(frontend=None, backend=None,
"global\n"
" daemon\n"
" user nobody\n"
" group nogroup\n"
" log /dev/log local0\n"
" log /dev/log local1 notice\n"
" stats socket /var/lib/octavia/sample_listener_id_1.sock"

View File

@ -0,0 +1,5 @@
---
deprecations:
- |
Finally completely the remove user_group option, as it was deprecated in
Pike.