Fix and gate linting
This fixes existing linting issues. It also adds linting to `make test` so that it is included in gating, and to ease dev workflow, so that they can just run `make test` and not have to remember to also run `make lint`, since this runs pretty fast anyway. Signed-off-by: Sean Eagan <seaneagan1@gmail.com> Change-Id: I3b2eec9304a769f399018acd7d2486e22894b519
This commit is contained in:
2
Makefile
2
Makefile
@@ -38,7 +38,7 @@ kubernetes:
|
||||
all: manager
|
||||
|
||||
# Run tests
|
||||
test: generate fmt vet manifests
|
||||
test: generate fmt vet manifests lint
|
||||
go test ./... -coverprofile cover.out
|
||||
|
||||
# Build manager binary
|
||||
|
||||
@@ -263,8 +263,8 @@ func removeString(slice []string, s string) []string {
|
||||
*/
|
||||
|
||||
// machines
|
||||
func (r *SIPClusterReconciler) gatherVBMH(ctx context.Context, sip airshipv1.SIPCluster) (*airshipvms.MachineList,
|
||||
error) {
|
||||
func (r *SIPClusterReconciler) gatherVBMH(ctx context.Context, sip airshipv1.SIPCluster) (
|
||||
*airshipvms.MachineList, error) {
|
||||
// 1- Let me retrieve all BMH that are unlabeled or already labeled with the target Tenant/CNF
|
||||
// 2- Let me now select the one's that meet the scheduling criteria
|
||||
// If I schedule successfully then
|
||||
@@ -297,7 +297,8 @@ func (r *SIPClusterReconciler) gatherVBMH(ctx context.Context, sip airshipv1.SIP
|
||||
return machines, nil
|
||||
}
|
||||
|
||||
func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *airshipvms.MachineList, logger logr.Logger) error {
|
||||
func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *airshipvms.MachineList,
|
||||
logger logr.Logger) error {
|
||||
if err := airshipsvc.CreateNS(sip.Spec.ClusterName, r.Client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -16,12 +16,14 @@ package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"html/template"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
airshipv1 "sipcluster/pkg/api/v1"
|
||||
airshipvms "sipcluster/pkg/vbmh"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -29,6 +31,7 @@ import (
|
||||
|
||||
const (
|
||||
// ConfigSecretName name of the haproxy config secret name/volume/mount
|
||||
/* #nosec */
|
||||
ConfigSecretName = "haproxy-config"
|
||||
// DefaultBalancerImage is the image that will be used as load balancer
|
||||
DefaultBalancerImage = "haproxy:2.3.2"
|
||||
@@ -60,12 +63,10 @@ func (lb loadBalancer) Deploy() error {
|
||||
return err
|
||||
}
|
||||
|
||||
lbService, err := lb.generateService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lbService := lb.generateService()
|
||||
lb.logger.Info("Applying loadbalancer service", "service", lbService.GetNamespace()+"/"+lbService.GetName())
|
||||
err = applyRuntimeObject(client.ObjectKey{Name: lbService.GetName(), Namespace: lbService.GetNamespace()}, lbService, lb.client)
|
||||
err = applyRuntimeObject(client.ObjectKey{Name: lbService.GetName(), Namespace: lbService.GetNamespace()},
|
||||
lbService, lb.client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -154,7 +155,7 @@ func (lb loadBalancer) generateSecret() (*corev1.Secret, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (lb loadBalancer) generateService() (*corev1.Service, error) {
|
||||
func (lb loadBalancer) generateService() *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: lb.sipName.Name + "-load-balancer-service",
|
||||
@@ -171,7 +172,7 @@ func (lb loadBalancer) generateService() (*corev1.Service, error) {
|
||||
Selector: map[string]string{"lb-name": lb.sipName.Namespace + "-haproxy"},
|
||||
Type: corev1.ServiceTypeNodePort,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
|
||||
Reference in New Issue
Block a user