Support for configurable HAProxy LoadBalancer

- Moved HAProxy control plan/worker templates as
  configMap

Relates-To: #19
Closes: #19
Change-Id: I9d2a3992827fa3a589c930e40ab3ab9d34527731
This commit is contained in:
Manoj Alva(ma257n) 2021-07-02 06:28:03 +00:00 committed by Manoj
parent a938b1eec6
commit 07b94538c0
8 changed files with 173 additions and 96 deletions

View File

@ -1,2 +1,9 @@
resources: resources:
- manager.yaml - manager.yaml
- loadbalancer
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/airshipit/sip
newTag: latest

View File

@ -0,0 +1,12 @@
configMapGenerator:
- name: loadbalancercontrolplane
files:
- loadBalancerControlPlane.cfg
- name: loadbalancerworker
files:
- loadBalancerWorker.cfg
generatorOptions:
disableNameSuffixHash: true
namespace: sipcluster-system

View File

@ -0,0 +1,48 @@
global
log stdout format raw local0 notice
daemon
defaults
mode http
log global
option httplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking. For usability of kubectl exec, the timeout should
# be long enough to cover inactivity due to idleness of interactive sessions.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking. For usability of kubectl log -f, the timeout should
# be long enough to cover inactivity due to the lack of new logs.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
mode tcp
option tcplog
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
mode tcp
balance roundrobin
option httpchk GET /readyz
http-check expect status 200
option log-health-checks
# Observed apiserver returns 500 for around 10s when 2nd cp node joins.
# downinter 2s makes it check more frequently to recover from that state sooner.
# Also changing fall to 4 so that it takes longer (4 failures) for it to take down a backend.
default-server check check-ssl verify none inter 5s downinter 2s fall 4 on-marked-down shutdown-sessions
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}

View File

@ -0,0 +1,40 @@
global
log stdout format raw local0 notice
daemon
defaults
mode tcp
log global
option tcplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
balance roundrobin
option tcp-check
tcp-check connect
option log-health-checks
default-server check
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}

View File

@ -58,4 +58,4 @@ spec:
end: 30011 end: 30011
nodeInterfaceId: oam-ipv4 nodeInterfaceId: oam-ipv4
# NOTE: clusterIP has not yet been implemented. # NOTE: clusterIP has not yet been implemented.
# clusterIP: 1.2.3.4 # IP of the base cluster VIP # clusterIP: 1.2.3.4 # IP of the base cluster VIP

View File

@ -16,6 +16,7 @@ package services
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"strings" "strings"
@ -236,7 +237,7 @@ func newLBControlPlane(name, namespace string,
logger logr.Logger, logger logr.Logger,
config airshipv1.LoadBalancerServiceControlPlane, config airshipv1.LoadBalancerServiceControlPlane,
machines *bmh.MachineList, machines *bmh.MachineList,
client client.Client) loadBalancerControlPlane { mgrClient client.Client) loadBalancerControlPlane {
servicePorts := []corev1.ServicePort{ servicePorts := []corev1.ServicePort{
{ {
Name: "http", Name: "http",
@ -244,6 +245,17 @@ func newLBControlPlane(name, namespace string,
NodePort: int32(config.NodePort), NodePort: int32(config.NodePort),
}, },
} }
//Get template string from the secret
templateControlPlane := ""
cm := &corev1.ConfigMap{}
err := mgrClient.Get(context.Background(), client.ObjectKey{
Name: "loadbalancercontrolplane",
Namespace: namespace}, cm)
if err != nil {
logger.Error(err, "unable to retrieve template info from secret.")
}
templateControlPlane = cm.Data["loadBalancerControlPlane.cfg"]
return loadBalancerControlPlane{loadBalancer{ return loadBalancerControlPlane{loadBalancer{
sipName: types.NamespacedName{ sipName: types.NamespacedName{
Name: name, Name: name,
@ -252,7 +264,7 @@ func newLBControlPlane(name, namespace string,
logger: logger, logger: logger,
config: config.SIPClusterService, config: config.SIPClusterService,
machines: machines, machines: machines,
client: client, client: mgrClient,
bmhRole: airshipv1.RoleControlPlane, bmhRole: airshipv1.RoleControlPlane,
template: templateControlPlane, template: templateControlPlane,
servicePorts: servicePorts, servicePorts: servicePorts,
@ -265,7 +277,7 @@ func newLBWorker(name, namespace string,
logger logr.Logger, logger logr.Logger,
config airshipv1.LoadBalancerServiceWorker, config airshipv1.LoadBalancerServiceWorker,
machines *bmh.MachineList, machines *bmh.MachineList,
client client.Client) loadBalancerWorker { mgrClient client.Client) loadBalancerWorker {
servicePorts := []corev1.ServicePort{} servicePorts := []corev1.ServicePort{}
for port := config.NodePortRange.Start; port <= config.NodePortRange.End; port++ { for port := config.NodePortRange.Start; port <= config.NodePortRange.End; port++ {
servicePorts = append(servicePorts, corev1.ServicePort{ servicePorts = append(servicePorts, corev1.ServicePort{
@ -274,6 +286,18 @@ func newLBWorker(name, namespace string,
NodePort: int32(port), NodePort: int32(port),
}) })
} }
//Get Template as a secret
templateWorker := ""
cm := &corev1.ConfigMap{}
err := mgrClient.Get(context.Background(), client.ObjectKey{
Name: "loadbalancerworker",
Namespace: namespace}, cm)
if err != nil {
logger.Error(err, "unable to retrieve template info from secret.")
}
templateWorker = cm.Data["loadBalancerWorker.cfg"]
return loadBalancerWorker{loadBalancer{ return loadBalancerWorker{loadBalancer{
sipName: types.NamespacedName{ sipName: types.NamespacedName{
Name: name, Name: name,
@ -282,7 +306,7 @@ func newLBWorker(name, namespace string,
logger: logger, logger: logger,
config: config.SIPClusterService, config: config.SIPClusterService,
machines: machines, machines: machines,
client: client, client: mgrClient,
bmhRole: airshipv1.RoleWorker, bmhRole: airshipv1.RoleWorker,
template: templateWorker, template: templateWorker,
servicePorts: servicePorts, servicePorts: servicePorts,
@ -310,93 +334,3 @@ func (lb loadBalancer) generateTemplate(p proxy) ([]byte, error) {
rendered := w.Bytes() rendered := w.Bytes()
return rendered, nil return rendered, nil
} }
var templateControlPlane = `global
log stdout format raw local0 notice
daemon
defaults
mode http
log global
option httplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking. For usability of 'kubectl exec', the timeout should
# be long enough to cover inactivity due to idleness of interactive sessions.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking. For usability of 'kubectl log -f', the timeout should
# be long enough to cover inactivity due to the lack of new logs.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
mode tcp
option tcplog
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
mode tcp
balance roundrobin
option httpchk GET /readyz
http-check expect status 200
option log-health-checks
# Observed apiserver returns 500 for around 10s when 2nd cp node joins.
# downinter 2s makes it check more frequently to recover from that state sooner.
# Also changing fall to 4 so that it takes longer (4 failures) for it to take down a backend.
default-server check check-ssl verify none inter 5s downinter 2s fall 4 on-marked-down shutdown-sessions
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}`
var templateWorker = `global
log stdout format raw local0 notice
daemon
defaults
mode tcp
log global
option tcplog
option dontlognull
retries 1
# Configures the timeout for a connection request to be left pending in a queue
# (connection requests are queued once the maximum number of connections is reached).
timeout queue 30s
# Configures the timeout for a connection to a backend server to be established.
timeout connect 30s
# Configures the timeout for inactivity during periods when we would expect
# the client to be speaking.
timeout client 600s
# Configures the timeout for inactivity during periods when we would expect
# the server to be speaking.
timeout server 600s
#---------------------------------------------------------------------
{{- $servers := .Servers }}
{{- range .ContainerPorts }}
{{- $containerPort := . }}
frontend {{ $containerPort.Name }}-frontend
bind *:{{ $containerPort.ContainerPort }}
default_backend {{ $containerPort.Name }}-backend
backend {{ $containerPort.Name }}-backend
balance roundrobin
option tcp-check
tcp-check connect
option log-health-checks
default-server check
{{- range $servers }}
{{- $server := . }}
server {{ $server.Name }} {{ $server.IP }}:{{ $containerPort.ContainerPort }}
{{ end -}}
{{ end -}}`

View File

@ -3,6 +3,7 @@ package services_test
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"io/ioutil"
"strings" "strings"
airshipv1 "sipcluster/pkg/api/v1" airshipv1 "sipcluster/pkg/api/v1"
@ -82,6 +83,23 @@ var _ = Describe("Service Set", func() {
bmh2.GetName(): m2, bmh2.GetName(): m2,
}, },
} }
//Secret for Template
TemplateControlPlane, err := ioutil.ReadFile("../../config/manager/loadbalancer/loadBalancerControlPlane.cfg")
if err == nil {
lbcontrolplaneTemplateConfigMap := testutil.CreateTemplateConfigMap("loadbalancercontrolplane",
"loadBalancerControlPlane.cfg", "default", string(TemplateControlPlane))
Expect(k8sClient.Create(context.Background(), lbcontrolplaneTemplateConfigMap)).Should(Succeed())
}
TemplateWorker, err := ioutil.ReadFile("../../config/manager/loadbalancer/loadBalancerWorker.cfg")
if err == nil {
lbworkerTemplateConfigMap := testutil.CreateTemplateConfigMap("loadbalancerworker",
"loadBalancerWorker.cfg", "default", string(TemplateWorker))
Expect(k8sClient.Create(context.Background(), lbworkerTemplateConfigMap)).Should(Succeed())
}
}) })
AfterEach(func() { AfterEach(func() {
@ -89,6 +107,7 @@ var _ = Describe("Service Set", func() {
Expect(k8sClient.DeleteAllOf(context.Background(), &metal3.BareMetalHost{}, opts...)).Should(Succeed()) Expect(k8sClient.DeleteAllOf(context.Background(), &metal3.BareMetalHost{}, opts...)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(context.Background(), &airshipv1.SIPCluster{}, opts...)).Should(Succeed()) Expect(k8sClient.DeleteAllOf(context.Background(), &airshipv1.SIPCluster{}, opts...)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.Secret{}, opts...)).Should(Succeed()) Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.Secret{}, opts...)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.ConfigMap{}, opts...)).Should(Succeed())
}) })
Context("When new SIP cluster is created", func() { Context("When new SIP cluster is created", func() {

View File

@ -380,7 +380,6 @@ func CreateSIPCluster(name string, namespace string, controlPlanes int, workers
SIPClusterService: airshipv1.SIPClusterService{ SIPClusterService: airshipv1.SIPClusterService{
NodeInterface: "oam-ipv4", NodeInterface: "oam-ipv4",
}, },
NodePort: 30001,
}, },
}, },
LoadBalancerWorker: []airshipv1.LoadBalancerServiceWorker{ LoadBalancerWorker: []airshipv1.LoadBalancerServiceWorker{
@ -429,6 +428,24 @@ func CreateBMCAuthSecret(nodeName string, namespace string, username string, pas
} }
} }
// CreateTemplateSecret creates a K8s Secret with template for HAProxy configuration
func CreateTemplateConfigMap(cmname string, templatename string, namespace string,
templatedata string) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: cmname,
Namespace: namespace,
},
Data: map[string]string{
templatename: templatedata,
},
}
}
func CompareLabels(expected labels.Selector, actual map[string]string) error { func CompareLabels(expected labels.Selector, actual map[string]string) error {
if !expected.Matches(labels.Set(actual)) { if !expected.Matches(labels.Set(actual)) {
return fmt.Errorf("labels do not match expected selector %v. Has labels %v", expected, actual) return fmt.Errorf("labels do not match expected selector %v. Has labels %v", expected, actual)