Merge "Add option to set listener timeouts for lb created by Kuryr"
This commit is contained in:
commit
e21d2f3d8b
|
@ -65,6 +65,9 @@ VAR_RUN_PATH=/var/run
|
|||
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
|
||||
# KURYR_ENFORCE_SG_RULES=False
|
||||
# KURYR_LB_ALGORITHM=SOURCE_IP_PORT
|
||||
# Uncomment to modify listener client and member inactivity timeout.
|
||||
# KURYR_TIMEOUT_CLIENT_DATA=50000
|
||||
# KURYR_TIMEOUT_MEMBER_DATA=50000
|
||||
|
||||
|
||||
# Octavia LBaaSv2
|
||||
|
|
|
@ -41,9 +41,15 @@ enable_service neutron-tag-ports-during-bulk-creation
|
|||
# VAR_RUN_PATH=/var/run
|
||||
|
||||
# OCTAVIA
|
||||
# =======
|
||||
# Uncomment it to use L2 communication between loadbalancer and member pods
|
||||
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
|
||||
|
||||
# Uncomment to change Octavia loadbalancer listener client and member
|
||||
# inactivity timeout from 50000ms.
|
||||
# KURYR_TIMEOUT_CLIENT_DATA=50000
|
||||
# KURYR_TIMEOUT_MEMBER_DATA=50000
|
||||
|
||||
# Octavia LBaaSv2
|
||||
LIBS_FROM_GIT+=python-octaviaclient
|
||||
enable_plugin octavia https://opendev.org/openstack/octavia
|
||||
|
|
|
@ -498,6 +498,8 @@ function configure_neutron_defaults {
|
|||
iniset "$KURYR_CONFIG" octavia_defaults member_mode "$KURYR_K8S_OCTAVIA_MEMBER_MODE"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults enforce_sg_rules "$KURYR_ENFORCE_SG_RULES"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults lb_algorithm "$KURYR_LB_ALGORITHM"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults timeout_client_data "$KURYR_TIMEOUT_CLIENT_DATA"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults timeout_member_data "$KURYR_TIMEOUT_MEMBER_DATA"
|
||||
# Octavia takes a very long time to start the LB in the gate. We need
|
||||
# to tweak the timeout for the LB creation. Let's be generous and give
|
||||
# it up to 20 minutes.
|
||||
|
|
|
@ -60,6 +60,8 @@ OPENSHIFT_CNI_BINARY_URL=${OPENSHIFT_CNI_BINARY_URL:-https://github.com/containe
|
|||
KURYR_K8S_OCTAVIA_MEMBER_MODE=${KURYR_K8S_OCTAVIA_MEMBER_MODE:-L3}
|
||||
KURYR_ENFORCE_SG_RULES=${KURYR_ENFORCE_SG_RULES:-True}
|
||||
KURYR_LB_ALGORITHM=${KURYR_LB_ALGORITHM:-ROUND_ROBIN}
|
||||
KURYR_TIMEOUT_CLIENT_DATA=${KURYR_TIMEOUT_CLIENT_DATA:-0}
|
||||
KURYR_TIMEOUT_MEMBER_DATA=${KURYR_TIMEOUT_MEMBER_DATA:-0}
|
||||
|
||||
# Kuryr_ovs_baremetal
|
||||
KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE=${KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE:-True}
|
||||
|
|
|
@ -47,3 +47,4 @@ This section describes how you can install and configure kuryr-kubernetes
|
|||
testing_udp_services
|
||||
testing_sriov_functional
|
||||
testing_sctp_services
|
||||
listener_timeouts
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
=====================================================
|
||||
How to configure Listener timeouts for Load Balancers
|
||||
=====================================================
|
||||
|
||||
By default, Kuryr uses the default Octavia timeout-client-data and
|
||||
timeout-member-data values when creating or modifying loadbalancers.
|
||||
To change the timeout values used in creating or modifying a particular
|
||||
service:
|
||||
|
||||
Set the new timeout values for openstack.org/kuryr-timeout-client-data and
|
||||
openstack.org/kuryr-timeout-member-data in the service annotation as seen in
|
||||
the service manifest below. This specification sets the timeout-client-data
|
||||
and the timeout-member-data to '70000' and '75000' respectively.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kuryr-demo
|
||||
annotations:
|
||||
openstack.org/kuryr-timeout-client-data: '70000'
|
||||
openstack.org/kuryr-timeout-member-data: '75000'
|
||||
spec:
|
||||
selector:
|
||||
app: server
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
|
||||
.. note::
|
||||
|
||||
The listener timeout values can be reset to the defaults by removing the
|
||||
sevice annotations for the timeout values.
|
||||
|
||||
Setting the timeouts via ConfigMap
|
||||
----------------------------------
|
||||
|
||||
Alternatively, you can change the value of the timeout-client-data and/or the
|
||||
timeout-member-data on the Kuryr ConfigMap. This option is ideal if the new
|
||||
timeout values will be used for multiple loadbalancers. On DevStack deployment,
|
||||
the Kuryr ConfigMap can be edited using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ kubectl -n kube-system edit cm kuryr-config
|
||||
|
||||
The listener timeouts then needs to be changed at the ConfigMap:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
[octavia_defaults]
|
||||
timeout_member_data = 0
|
||||
timeout_client_data = 0
|
||||
|
||||
Another option is to set the listener timeouts at the local.conf file.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#KURYR_TIMEOUT_CLIENT_DATA=0
|
||||
#KURYR_TIMEOUT_MEMBER_DATA=0
|
||||
|
||||
.. note::
|
||||
|
||||
The listener timeouts values set via the ConfigMap or set at the local.conf
|
||||
can be overridden by values set in the service annotations for a particular
|
||||
service.
|
|
@ -110,6 +110,10 @@ spec:
|
|||
type: string
|
||||
provider:
|
||||
type: string
|
||||
timeout_client_data:
|
||||
type: integer
|
||||
timeout_member_data:
|
||||
type: integer
|
||||
status:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -137,6 +141,10 @@ spec:
|
|||
type: string
|
||||
protocol:
|
||||
type: string
|
||||
timeout_client_data:
|
||||
type: integer
|
||||
timeout_member_data:
|
||||
type: integer
|
||||
loadbalancer:
|
||||
type: object
|
||||
required:
|
||||
|
|
|
@ -259,6 +259,16 @@ octavia_defaults = [
|
|||
"to the pool members. The options are: ROUND_ROBIN, "
|
||||
"LEAST_CONNECTIONS, SOURCE_IP and SOURCE_IP_PORT."),
|
||||
default='ROUND_ROBIN'),
|
||||
cfg.IntOpt('timeout_client_data',
|
||||
help=_("Frontend client inactivity timeout in milliseconds. "
|
||||
"When set to 0, the default timeout value set by "
|
||||
"Octavia is used."),
|
||||
default=0),
|
||||
cfg.IntOpt('timeout_member_data',
|
||||
help=_("Backend member inactivity timeout in milliseconds. "
|
||||
"When set to 0, the default timeout value set by "
|
||||
"Octavia is used."),
|
||||
default=0),
|
||||
]
|
||||
|
||||
cache_defaults = [
|
||||
|
|
|
@ -60,6 +60,9 @@ K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd'
|
|||
K8S_ANNOTATION_NETPOLICY_CRD = K8S_ANNOTATION_PREFIX + '-netpolicy-crd'
|
||||
K8S_ANNOTATION_POLICY = K8S_ANNOTATION_PREFIX + '-counter'
|
||||
|
||||
K8S_ANNOTATION_CLIENT_TIMEOUT = K8S_ANNOTATION_PREFIX + '-timeout-client-data'
|
||||
K8S_ANNOTATION_MEMBER_TIMEOUT = K8S_ANNOTATION_PREFIX + '-timeout-member-data'
|
||||
|
||||
K8S_ANNOTATION_NPWG_PREFIX = 'k8s.v1.cni.cncf.io'
|
||||
K8S_ANNOTATION_NPWG_NETWORK = K8S_ANNOTATION_NPWG_PREFIX + '/networks'
|
||||
K8S_ANNOTATION_NPWG_CRD_SUBNET_ID = 'subnetId'
|
||||
|
|
|
@ -357,7 +357,8 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
'network-policy' not in rule.get('description'))
|
||||
|
||||
def ensure_listener(self, loadbalancer, protocol, port,
|
||||
service_type='ClusterIP'):
|
||||
service_type='ClusterIP', timeout_client_data=0,
|
||||
timeout_member_data=0):
|
||||
name = "%s:%s:%s" % (loadbalancer['name'], protocol, port)
|
||||
listener = {
|
||||
'name': name,
|
||||
|
@ -366,6 +367,10 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
'protocol': protocol,
|
||||
'port': port
|
||||
}
|
||||
if timeout_client_data:
|
||||
listener['timeout_client_data'] = timeout_client_data
|
||||
if timeout_member_data:
|
||||
listener['timeout_member_data'] = timeout_member_data
|
||||
try:
|
||||
result = self._ensure_provisioned(
|
||||
loadbalancer, listener, self._create_listener,
|
||||
|
@ -552,10 +557,18 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
'protocol': listener['protocol'],
|
||||
'protocol_port': listener['port'],
|
||||
}
|
||||
timeout_cli = listener.get('timeout_client_data')
|
||||
timeout_mem = listener.get('timeout_member_data')
|
||||
if timeout_cli:
|
||||
request['timeout_client_data'] = timeout_cli
|
||||
if timeout_mem:
|
||||
request['timeout_member_data'] = timeout_mem
|
||||
self.add_tags('listener', request)
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_listener(**request)
|
||||
listener['id'] = response.id
|
||||
listener['timeout_client_data'] = response.timeout_client_data
|
||||
listener['timeout_member_data'] = response.timeout_member_data
|
||||
return listener
|
||||
|
||||
def _update_listener_acls(self, loadbalancer, listener_id, allowed_cidrs):
|
||||
|
@ -589,6 +602,8 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
|
||||
def _find_listener(self, listener, loadbalancer):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
timeout_cli = listener.get('timeout_client_data')
|
||||
timeout_mb = listener.get('timeout_member_data')
|
||||
response = lbaas.listeners(
|
||||
name=listener['name'],
|
||||
project_id=listener['project_id'],
|
||||
|
@ -596,16 +611,33 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
protocol=listener['protocol'],
|
||||
protocol_port=listener['port'])
|
||||
|
||||
request = {}
|
||||
request['timeout_client_data'] = timeout_cli
|
||||
request['timeout_member_data'] = timeout_mb
|
||||
try:
|
||||
os_listener = next(response)
|
||||
listener['id'] = os_listener.id
|
||||
n_listen = None
|
||||
if os_listener.provisioning_status == 'ERROR':
|
||||
LOG.debug("Releasing listener %s", os_listener.id)
|
||||
self.release_listener(loadbalancer, listener)
|
||||
return None
|
||||
if (timeout_cli and (
|
||||
os_listener.timeout_client_data != timeout_cli)) or (
|
||||
timeout_mb and (
|
||||
os_listener.timeout_member_data != timeout_mb)):
|
||||
n_listen = lbaas.update_listener(os_listener.id, **request)
|
||||
elif not timeout_cli or not timeout_mb:
|
||||
n_listen = lbaas.update_listener(os_listener.id, **request)
|
||||
if n_listen:
|
||||
listener['timeout_client_data'] = n_listen.timeout_client_data
|
||||
listener['timeout_member_data'] = n_listen.timeout_member_data
|
||||
|
||||
except (KeyError, StopIteration):
|
||||
return None
|
||||
|
||||
except os_exc.SDKException:
|
||||
LOG.error('Error when updating listener %s' % listener['id'])
|
||||
raise k_exc.ResourceNotReady(listener['id'])
|
||||
return listener
|
||||
|
||||
def _create_pool(self, pool):
|
||||
|
|
|
@ -182,6 +182,25 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
LOG.exception('Error updating kuryrnet CRD %s', loadbalancer_crd)
|
||||
raise
|
||||
|
||||
def _get_data_timeout_annotation(self, service):
|
||||
default_timeout_cli = config.CONF.octavia_defaults.timeout_client_data
|
||||
default_timeout_mem = config.CONF.octavia_defaults.timeout_member_data
|
||||
try:
|
||||
annotations = service['metadata']['annotations']
|
||||
except KeyError:
|
||||
return default_timeout_cli, default_timeout_mem
|
||||
try:
|
||||
timeout_cli = annotations[k_const.K8S_ANNOTATION_CLIENT_TIMEOUT]
|
||||
data_timeout_cli = int(timeout_cli)
|
||||
except KeyError:
|
||||
data_timeout_cli = default_timeout_cli
|
||||
try:
|
||||
timeout_mem = annotations[k_const.K8S_ANNOTATION_MEMBER_TIMEOUT]
|
||||
data_timeout_mem = int(timeout_mem)
|
||||
except KeyError:
|
||||
data_timeout_mem = default_timeout_mem
|
||||
return data_timeout_cli, data_timeout_mem
|
||||
|
||||
def _build_kuryrloadbalancer_spec(self, service):
|
||||
svc_ip = self._get_service_ip(service)
|
||||
spec_lb_ip = service['spec'].get('loadBalancerIP')
|
||||
|
@ -193,7 +212,6 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
sg_ids = self._drv_sg.get_security_groups(service, project_id)
|
||||
subnet_id = self._get_subnet_id(service, project_id, svc_ip)
|
||||
spec_type = service['spec'].get('type')
|
||||
|
||||
spec = {
|
||||
'ip': svc_ip,
|
||||
'ports': ports,
|
||||
|
@ -205,11 +223,15 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
|
||||
if spec_lb_ip is not None:
|
||||
spec['lb_ip'] = spec_lb_ip
|
||||
timeout_cli, timeout_mem = self._get_data_timeout_annotation(service)
|
||||
spec['timeout_client_data'] = timeout_cli
|
||||
spec['timeout_member_data'] = timeout_mem
|
||||
return spec
|
||||
|
||||
def _has_lbaas_spec_changes(self, service, loadbalancer_crd):
|
||||
return (self._has_ip_changes(service, loadbalancer_crd) or
|
||||
utils.has_port_changes(service, loadbalancer_crd))
|
||||
utils.has_port_changes(service, loadbalancer_crd) or
|
||||
self._has_timeout_changes(service, loadbalancer_crd))
|
||||
|
||||
def _has_ip_changes(self, service, loadbalancer_crd):
|
||||
link = utils.get_res_link(service)
|
||||
|
@ -229,6 +251,22 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
|
||||
return False
|
||||
|
||||
def _has_timeout_changes(self, service, loadbalancer_crd):
|
||||
link = utils.get_res_link(service)
|
||||
cli_timeout, mem_timeout = self._get_data_timeout_annotation(service)
|
||||
|
||||
for spec_value, current_value in [(loadbalancer_crd['spec'].get(
|
||||
'timeout_client_data'), cli_timeout), (loadbalancer_crd[
|
||||
'spec'].get('timeout_member_data'), mem_timeout)]:
|
||||
if not spec_value and not current_value:
|
||||
continue
|
||||
elif spec_value != current_value:
|
||||
LOG.debug("LBaaS spec listener timeout {} != {} for {}".format(
|
||||
spec_value, current_value, link))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class EndpointsHandler(k8s_base.ResourceEventHandler):
|
||||
"""EndpointsHandler handles K8s Endpoints events.
|
||||
|
|
|
@ -575,6 +575,8 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
|
|||
def _add_new_listeners(self, loadbalancer_crd):
|
||||
changed = False
|
||||
lb_crd_spec_ports = loadbalancer_crd['spec'].get('ports')
|
||||
spec_t_cli = loadbalancer_crd['spec'].get('timeout_client_data')
|
||||
spec_t_mb = loadbalancer_crd['spec'].get('timeout_member_data')
|
||||
if not lb_crd_spec_ports:
|
||||
return changed
|
||||
lbaas_spec_ports = sorted(lb_crd_spec_ports,
|
||||
|
@ -585,9 +587,13 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
|
|||
name = "%s:%s" % (loadbalancer_crd['status']['loadbalancer'][
|
||||
'name'], protocol)
|
||||
|
||||
listener = [l for l in loadbalancer_crd['status'].get(
|
||||
'listeners', []) if l['port'] == port and l[
|
||||
'protocol'] == protocol]
|
||||
listener = []
|
||||
for l in loadbalancer_crd['status'].get('listeners', []):
|
||||
timeout_cli = l.get('timeout_client_data')
|
||||
timeout_mb = l.get('timeout_member_data')
|
||||
if l['port'] == port and l['protocol'] == protocol:
|
||||
if timeout_cli == spec_t_cli and timeout_mb == spec_t_mb:
|
||||
listener.append(l)
|
||||
|
||||
if listener:
|
||||
continue
|
||||
|
@ -610,10 +616,16 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
|
|||
loadbalancer=loadbalancer_crd['status'].get('loadbalancer'),
|
||||
protocol=protocol,
|
||||
port=port,
|
||||
service_type=loadbalancer_crd['spec'].get('type'))
|
||||
service_type=loadbalancer_crd['spec'].get('type'),
|
||||
timeout_client_data=spec_t_cli,
|
||||
timeout_member_data=spec_t_mb)
|
||||
|
||||
if listener is not None:
|
||||
listeners = loadbalancer_crd['status'].get('listeners', [])
|
||||
if listeners:
|
||||
for pre_listener in listeners:
|
||||
if pre_listener['id'] == listener['id']:
|
||||
listeners.remove(pre_listener)
|
||||
listeners.append(listener)
|
||||
else:
|
||||
loadbalancer_crd['status']['listeners'] = []
|
||||
|
|
|
@ -195,8 +195,7 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
|||
# TODO(ivc): handle security groups
|
||||
m_driver._ensure_provisioned.return_value = expected_resp
|
||||
|
||||
resp = cls.ensure_listener(m_driver, loadbalancer,
|
||||
protocol, port)
|
||||
resp = cls.ensure_listener(m_driver, loadbalancer, protocol, port)
|
||||
|
||||
m_driver._ensure_provisioned.assert_called_once_with(
|
||||
loadbalancer, mock.ANY, m_driver._create_listener,
|
||||
|
@ -578,6 +577,36 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
|||
self.assertEqual(listener, ret)
|
||||
self.assertEqual(listener_id, ret['id'])
|
||||
|
||||
def test_create_listener_with_different_timeouts(self):
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
listener = {
|
||||
'name': 'TEST_NAME',
|
||||
'project_id': 'TEST_PROJECT',
|
||||
'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
'protocol': 'TCP',
|
||||
'port': 5678,
|
||||
'timeout_client_data': 75000,
|
||||
'timeout_member_data': 0
|
||||
}
|
||||
listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB'
|
||||
|
||||
req = {
|
||||
'name': listener['name'],
|
||||
'project_id': listener['project_id'],
|
||||
'loadbalancer_id': listener['loadbalancer_id'],
|
||||
'protocol': listener['protocol'],
|
||||
'protocol_port': listener['port'],
|
||||
'timeout_client_data': listener['timeout_client_data']}
|
||||
resp = o_lis.Listener(id=listener_id)
|
||||
lbaas.create_listener.return_value = resp
|
||||
|
||||
ret = cls._create_listener(m_driver, listener)
|
||||
lbaas.create_listener.assert_called_once_with(**req)
|
||||
self.assertEqual(listener, ret)
|
||||
self.assertEqual(listener_id, ret['id'])
|
||||
|
||||
def test_find_listener(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
|
|
|
@ -226,11 +226,15 @@ class TestServiceHandler(test_base.TestCase):
|
|||
|
||||
for has_ip_changes in (True, False):
|
||||
for has_port_changes in (True, False):
|
||||
m_handler._has_ip_changes.return_value = has_ip_changes
|
||||
m_port_changes.return_value = has_port_changes
|
||||
ret = h_lbaas.ServiceHandler._has_lbaas_spec_changes(
|
||||
m_handler, service, lbaas_spec)
|
||||
self.assertEqual(has_ip_changes or has_port_changes, ret)
|
||||
for has_timeout_ in (True, False):
|
||||
m_handler._has_ip_changes.return_value = has_ip_changes
|
||||
m_port_changes.return_value = has_port_changes
|
||||
m_handler._has_timeout_changes.return_value = has_timeout_
|
||||
ret = h_lbaas.ServiceHandler._has_lbaas_spec_changes(
|
||||
m_handler, service, lbaas_spec)
|
||||
self.assertEqual(
|
||||
has_ip_changes or has_port_changes or has_timeout_,
|
||||
ret)
|
||||
|
||||
def test_has_ip_changes(self):
|
||||
m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
features:
|
||||
- |
|
||||
Kuryr will now support the configuration of Octavia listener timeouts.
|
||||
The timeout-client-data and timeout-member-data settings of listeners
|
||||
can be configured to use values other than the Octavia defaults when
|
||||
creating or modifying loadbalancers handled by Kuryr. In order to use this
|
||||
functionality, the new timeout values can be annotated to the Service or
|
||||
set in kuryr.conf. New options ``[octavia_defaults]timeout_client_data``
|
||||
and ``[octavia_defaults]timeout_member_data`` were added.
|
Loading…
Reference in New Issue