Add option to set listener timeouts for lb created by Kuryr
The timeout-client-data and timeout-member-data configurations for Octavia listeners default to 50 seconds for load balancers created by Kuryr. This patch allows the creation and modification of load balancers handled by Kuryr with different timeouts values. Implements: blueprint configure-lb-listeners-timeout Change-Id: I99016001c2263023d1fa2637d7b5aeb23b3b2d9d
This commit is contained in:
parent
10ea869858
commit
9b3c45d84a
|
@ -65,6 +65,9 @@ VAR_RUN_PATH=/var/run
|
|||
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
|
||||
# KURYR_ENFORCE_SG_RULES=False
|
||||
# KURYR_LB_ALGORITHM=SOURCE_IP_PORT
|
||||
# Uncomment to modify listener client and member inactivity timeout.
|
||||
# KURYR_TIMEOUT_CLIENT_DATA=50000
|
||||
# KURYR_TIMEOUT_MEMBER_DATA=50000
|
||||
|
||||
|
||||
# Octavia LBaaSv2
|
||||
|
|
|
@ -41,9 +41,15 @@ enable_service neutron-tag-ports-during-bulk-creation
|
|||
# VAR_RUN_PATH=/var/run
|
||||
|
||||
# OCTAVIA
|
||||
# =======
|
||||
# Uncomment it to use L2 communication between loadbalancer and member pods
|
||||
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
|
||||
|
||||
# Uncomment to modify Octavia loadbalancer listener client and member
|
||||
# inactivity timeout.
|
||||
# KURYR_TIMEOUT_CLIENT_DATA=50000
|
||||
# KURYR_TIMEOUT_MEMBER_DATA=50000
|
||||
|
||||
# Octavia LBaaSv2
|
||||
LIBS_FROM_GIT+=python-octaviaclient
|
||||
enable_plugin octavia https://opendev.org/openstack/octavia
|
||||
|
@ -233,7 +239,7 @@ enable_service kuryr-daemon
|
|||
# Uncomment the next line to force devstack to create a new router for kuryr
|
||||
# networks instead of using the default one being created by devstack
|
||||
# KURYR_NEUTRON_DEFAULT_ROUTER = kuryr-router
|
||||
|
||||
|
||||
# Increase Octavia amphorae timeout so that the first LB amphora has time to
|
||||
# build and boot
|
||||
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
|
||||
|
|
|
@ -485,6 +485,8 @@ function configure_neutron_defaults {
|
|||
iniset "$KURYR_CONFIG" octavia_defaults member_mode "$KURYR_K8S_OCTAVIA_MEMBER_MODE"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults enforce_sg_rules "$KURYR_ENFORCE_SG_RULES"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults lb_algorithm "$KURYR_LB_ALGORITHM"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults timeout_client_data "$KURYR_TIMEOUT_CLIENT_DATA"
|
||||
iniset "$KURYR_CONFIG" octavia_defaults timeout_member_data "$KURYR_TIMEOUT_MEMBER_DATA"
|
||||
# Octavia takes a very long time to start the LB in the gate. We need
|
||||
# to tweak the timeout for the LB creation. Let's be generous and give
|
||||
# it up to 20 minutes.
|
||||
|
|
|
@ -60,6 +60,8 @@ OPENSHIFT_CNI_BINARY_URL=${OPENSHIFT_CNI_BINARY_URL:-https://github.com/containe
|
|||
KURYR_K8S_OCTAVIA_MEMBER_MODE=${KURYR_K8S_OCTAVIA_MEMBER_MODE:-L3}
|
||||
KURYR_ENFORCE_SG_RULES=${KURYR_ENFORCE_SG_RULES:-True}
|
||||
KURYR_LB_ALGORITHM=${KURYR_LB_ALGORITHM:-ROUND_ROBIN}
|
||||
KURYR_TIMEOUT_CLIENT_DATA=${KURYR_TIMEOUT_CLIENT_DATA:-0}
|
||||
KURYR_TIMEOUT_MEMBER_DATA=${KURYR_TIMEOUT_MEMBER_DATA:-0}
|
||||
|
||||
# Kuryr_ovs_baremetal
|
||||
KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE=${KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE:-True}
|
||||
|
|
|
@ -110,6 +110,10 @@ spec:
|
|||
type: string
|
||||
provider:
|
||||
type: string
|
||||
timeout_client_data:
|
||||
type: integer
|
||||
timeout_member_data:
|
||||
type: integer
|
||||
status:
|
||||
type: object
|
||||
properties:
|
||||
|
|
|
@ -259,6 +259,14 @@ octavia_defaults = [
|
|||
"to the pool members. The options are: ROUND_ROBIN, "
|
||||
"LEAST_CONNECTIONS, SOURCE_IP and SOURCE_IP_PORT."),
|
||||
default='ROUND_ROBIN'),
|
||||
cfg.IntOpt('timeout_client_data',
|
||||
help=_("Frontend client inactivity timeout in milliseconds. "
|
||||
"Default: 50000."),
|
||||
default=0),
|
||||
cfg.IntOpt('timeout_member_data',
|
||||
help=_("Backend member inactivity timeout in milliseconds. "
|
||||
"Default: 50000."),
|
||||
default=0),
|
||||
]
|
||||
|
||||
cache_defaults = [
|
||||
|
|
|
@ -61,6 +61,9 @@ K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd'
|
|||
K8S_ANNOTATION_NETPOLICY_CRD = K8S_ANNOTATION_PREFIX + '-netpolicy-crd'
|
||||
K8S_ANNOTATION_POLICY = K8S_ANNOTATION_PREFIX + '-counter'
|
||||
|
||||
K8S_ANNOTATION_CLIENT_TIMEOUT = K8S_ANNOTATION_PREFIX + '-timeout-client-data'
|
||||
K8S_ANNOTATION_MEMBER_TIMEOUT = K8S_ANNOTATION_PREFIX + '-timeout-member-data'
|
||||
|
||||
K8S_ANNOTATION_NPWG_PREFIX = 'k8s.v1.cni.cncf.io'
|
||||
K8S_ANNOTATION_NPWG_NETWORK = K8S_ANNOTATION_NPWG_PREFIX + '/networks'
|
||||
K8S_ANNOTATION_NPWG_CRD_SUBNET_ID = 'subnetId'
|
||||
|
|
|
@ -364,7 +364,9 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
'project_id': loadbalancer['project_id'],
|
||||
'loadbalancer_id': loadbalancer['id'],
|
||||
'protocol': protocol,
|
||||
'port': port
|
||||
'port': port,
|
||||
'timeout_client_data': loadbalancer['timeout_client_data'],
|
||||
'timeout_member_data': loadbalancer['timeout_member_data']
|
||||
}
|
||||
try:
|
||||
result = self._ensure_provisioned(
|
||||
|
@ -552,6 +554,16 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
|||
'protocol': listener['protocol'],
|
||||
'protocol_port': listener['port'],
|
||||
}
|
||||
timeout_cli = CONF.octavia_defaults.timeout_client_data \
|
||||
if not listener['timeout_client_data'] \
|
||||
else listener['timeout_client_data']
|
||||
timeout_mem = listener['timeout_member_data'] \
|
||||
if listener['timeout_member_data'] \
|
||||
else CONF.octavia_defaults.timeout_member_data
|
||||
if timeout_cli:
|
||||
request['timeout_client_data'] = timeout_cli
|
||||
if timeout_mem:
|
||||
request['timeout_member_data'] = timeout_mem
|
||||
self.add_tags('listener', request)
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_listener(**request)
|
||||
|
|
|
@ -158,6 +158,7 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
kubernetes.post('{}/{}/kuryrloadbalancers'.format(
|
||||
k_const.K8S_API_CRD_NAMESPACES, svc_namespace),
|
||||
loadbalancer_crd)
|
||||
LOG.debug("loadbalancer_crd: {}".format(loadbalancer_crd))
|
||||
except k_exc.K8sConflict:
|
||||
raise k_exc.ResourceNotReady(svc_name)
|
||||
except k_exc.K8sNamespaceTerminating:
|
||||
|
@ -182,6 +183,24 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
LOG.exception('Error updating kuryrnet CRD %s', loadbalancer_crd)
|
||||
raise
|
||||
|
||||
def _get_data_timeout_annotation(self, service):
|
||||
try:
|
||||
annotations = service['metadata']['annotations']
|
||||
except KeyError:
|
||||
return None, None
|
||||
try:
|
||||
timeout_cli = annotations[k_const.K8S_ANNOTATION_CLIENT_TIMEOUT]
|
||||
data_timeout_cli = int(timeout_cli)
|
||||
except KeyError:
|
||||
data_timeout_cli = None
|
||||
try:
|
||||
timeout_mem = annotations[k_const.K8S_ANNOTATION_MEMBER_TIMEOUT]
|
||||
data_timeout_mem = int(timeout_mem)
|
||||
except KeyError:
|
||||
data_timeout_mem = None
|
||||
LOG.debug("Data timeouts: {}, {}".format(timeout_cli, timeout_mem))
|
||||
return data_timeout_cli, data_timeout_mem
|
||||
|
||||
def _build_kuryrloadbalancer_spec(self, service):
|
||||
svc_ip = self._get_service_ip(service)
|
||||
spec_lb_ip = service['spec'].get('loadBalancerIP')
|
||||
|
@ -193,7 +212,6 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
sg_ids = self._drv_sg.get_security_groups(service, project_id)
|
||||
subnet_id = self._get_subnet_id(service, project_id, svc_ip)
|
||||
spec_type = service['spec'].get('type')
|
||||
|
||||
spec = {
|
||||
'ip': svc_ip,
|
||||
'ports': ports,
|
||||
|
@ -205,6 +223,9 @@ class ServiceHandler(k8s_base.ResourceEventHandler):
|
|||
|
||||
if spec_lb_ip is not None:
|
||||
spec['lb_ip'] = spec_lb_ip
|
||||
timeout_cli, timeout_mem = self._get_data_timeout_annotation(service)
|
||||
spec['timeout_client_data'] = timeout_cli
|
||||
spec['timeout_member_data'] = timeout_mem
|
||||
return spec
|
||||
|
||||
def _has_lbaas_spec_changes(self, service, loadbalancer_crd):
|
||||
|
|
|
@ -605,8 +605,13 @@ class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
|
|||
LOG.warning("Skipping listener creation as provider does"
|
||||
" not support %s protocol", protocol)
|
||||
continue
|
||||
lb = loadbalancer_crd['status'].get('loadbalancer')
|
||||
lb['timeout_client_data'] = loadbalancer_crd['spec'].get(
|
||||
'timeout_client_data')
|
||||
lb['timeout_member_data'] = loadbalancer_crd['spec'].get(
|
||||
'timeout_member_data')
|
||||
listener = self._drv_lbaas.ensure_listener(
|
||||
loadbalancer=loadbalancer_crd['status'].get('loadbalancer'),
|
||||
loadbalancer=lb,
|
||||
protocol=protocol,
|
||||
port=port,
|
||||
service_type=loadbalancer_crd['spec'].get('type'))
|
||||
|
|
|
@ -190,13 +190,15 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
|||
'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
|
||||
'ip': '1.2.3.4',
|
||||
'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
'provider': 'amphora'
|
||||
'provider': 'amphora',
|
||||
'timeout_client_data': 50000,
|
||||
'timeout_member_data': 50000
|
||||
}
|
||||
# TODO(ivc): handle security groups
|
||||
m_driver._ensure_provisioned.return_value = expected_resp
|
||||
|
||||
resp = cls.ensure_listener(m_driver, loadbalancer,
|
||||
protocol, port)
|
||||
resp = cls.ensure_listener(
|
||||
m_driver, loadbalancer, protocol, port)
|
||||
|
||||
m_driver._ensure_provisioned.assert_called_once_with(
|
||||
loadbalancer, mock.ANY, m_driver._create_listener,
|
||||
|
@ -222,7 +224,9 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
|||
'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
|
||||
'ip': '1.2.3.4',
|
||||
'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
'provider': 'amphora'
|
||||
'provider': 'amphora',
|
||||
'timeout_client_data': 50000,
|
||||
'timeout_member_data': 50000
|
||||
}
|
||||
m_driver._ensure_provisioned.side_effect = os_exc.BadRequestException
|
||||
|
||||
|
@ -560,7 +564,10 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
|||
'project_id': 'TEST_PROJECT',
|
||||
'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
'protocol': 'TCP',
|
||||
'port': 1234
|
||||
'port': 1234,
|
||||
'timeout_client_data': 0,
|
||||
'timeout_member_data': 0
|
||||
|
||||
}
|
||||
listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB'
|
||||
|
||||
|
@ -578,6 +585,36 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
|||
self.assertEqual(listener, ret)
|
||||
self.assertEqual(listener_id, ret['id'])
|
||||
|
||||
def test_create_listener_with_different_timeouts(self):
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
listener = {
|
||||
'name': 'TEST_NAME',
|
||||
'project_id': 'TEST_PROJECT',
|
||||
'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
'protocol': 'TCP',
|
||||
'port': 5678,
|
||||
'timeout_client_data': 75000,
|
||||
'timeout_member_data': 0
|
||||
}
|
||||
listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB'
|
||||
|
||||
req = {
|
||||
'name': listener['name'],
|
||||
'project_id': listener['project_id'],
|
||||
'loadbalancer_id': listener['loadbalancer_id'],
|
||||
'protocol': listener['protocol'],
|
||||
'protocol_port': listener['port'],
|
||||
'timeout_client_data': listener['timeout_client_data']}
|
||||
resp = o_lis.Listener(id=listener_id)
|
||||
lbaas.create_listener.return_value = resp
|
||||
|
||||
ret = cls._create_listener(m_driver, listener)
|
||||
lbaas.create_listener.assert_called_once_with(**req)
|
||||
self.assertEqual(listener, ret)
|
||||
self.assertEqual(listener_id, ret['id'])
|
||||
|
||||
def test_find_listener(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
|
|
Loading…
Reference in New Issue