Add label core affinity labels to each vault pod
This commit adds the support to core affinity labels for
vault. The label 'app.starlingx.io/component' identifies
to k8s to rather run the application pods by 'platform'
or 'application' cores.
The default value for 'app.starlingx.io/component' label
is 'platform', but the label accept the values
'application' and 'platform'. The override has to be
performed when vault is in the uploaded state, after
application remove or before the first apply. This
behavior is required to ensure that no vault pod is
restarted in an improper manner.
Test plan:
PASS: In a AIO-SX system upload and apply the vault app. When apply
is finished, run "kubectl -n vault describe po sva | grep
platform" and the output should be three instances of
"app.starlingx.io/component=platform", indicating that the
default configuration is applied ofr each pod.
PASS: In a AIO-SX, where the vault app is in the applied state, run
"system application-remove vault" and override
'app.starlingx.io/component' label with 'application' value by
helm api. After the override, apply vault and verify
'app.starlingx.io/component' label is 'application' on the
pods describe output, similar to the previous test.
PASS: In a AIO-SX, where the vault app is in the applied state, run
"system application-remove vault" and override
'app.starlingx.io/component' label with any value rather
than 'platform' or 'application' and after the apply check if
the default value of 'platform' was used for the pod labels.
PASS: In a Standard configuration with one worker node, upload and
apply the vault app. When apply is finished, run 'kubectl -n
vault describe po sva | grep -b3 "app.starlingx.io/component"'
and check the output for the 'app.starlingx.io/component'
label is the default value of 'platform' for each pod, with
every vault server pod having the label.
PASS: In a Standard configuration with one worker node, remove vault
and override 'app.starlingx.io/component' label with any value,
valid or not, and after the override, apply vault. With vault
in the applied state, verify the replica count override is kept
and check the pods in a similar way to the previous test to
validate that the HA configuration is maintained. The number
of pods replicas should reflect the configuration.
Story: 2010612
Task: 48252
Change-Id: If729ab8bb8fecddf54824f5aa59326960b66942a
Signed-off-by: Alan Bandeira <Alan.PortelaBandeira@windriver.com>
This commit is contained in:
@@ -1,9 +1,16 @@
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Helm: Supported charts:
|
||||
# These values match the names in the chart package's Chart.yaml
|
||||
HELM_APP_VAULT = 'vault'
|
||||
HELM_CHART_VAULT = 'vault'
|
||||
HELM_CHART_NS_VAULT = 'vault'
|
||||
HELM_VAULT_SERVER_POD = 'server'
|
||||
HELM_VAULT_MANAGER_POD = 'manager'
|
||||
HELM_VAULT_INJECTOR_POD = 'injector'
|
||||
|
||||
HELM_CHART_COMPONENT_LABEL = 'app.starlingx.io/component'
|
||||
@@ -1,31 +1,46 @@
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2023 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from k8sapp_vault.common import constants as app_constants
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
|
||||
from sysinv.helm import base
|
||||
from sysinv.helm import common
|
||||
|
||||
from sysinv.db import api as dbapi
|
||||
|
||||
import yaml
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VaultHelm(base.BaseHelm):
|
||||
"""Class to encapsulate helm operations for the vault chart"""
|
||||
|
||||
class PodNames():
|
||||
"""Class containing the default pod names"""
|
||||
SERVER = app_constants.HELM_VAULT_SERVER_POD
|
||||
MANAGER = app_constants.HELM_VAULT_MANAGER_POD
|
||||
INJECTOR = app_constants.HELM_VAULT_INJECTOR_POD
|
||||
|
||||
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
|
||||
[common.HELM_NS_VAULT]
|
||||
[common.HELM_NS_VAULT]
|
||||
|
||||
SUPPORTED_APP_NAMESPACES = {
|
||||
constants.HELM_APP_VAULT:
|
||||
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_VAULT],
|
||||
}
|
||||
|
||||
CHART = app_constants.HELM_CHART_VAULT
|
||||
|
||||
SERVICE_NAME = 'vault'
|
||||
SUPPORTED_COMPONENT_OVERRIDES = ['application','platform']
|
||||
DEFAULT_AFFINITY = 'platform'
|
||||
LABEL_PARAMETER = 'extraLabels'
|
||||
|
||||
def get_namespaces(self):
|
||||
return self.SUPPORTED_NAMESPACES
|
||||
@@ -35,28 +50,59 @@ class VaultHelm(base.BaseHelm):
|
||||
worker=len(self.dbapi.ihost_get_by_personality(constants.WORKER))
|
||||
return controller+worker
|
||||
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
|
||||
if self.get_master_worker_host_count() >= 3:
|
||||
overrides = {
|
||||
common.HELM_NS_VAULT: {
|
||||
'server': {
|
||||
'ha': {
|
||||
'replicas': 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
ha_replicas = 3
|
||||
else:
|
||||
overrides = {
|
||||
common.HELM_NS_VAULT: {
|
||||
'server': {
|
||||
'ha': {
|
||||
'replicas': 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
ha_replicas = 1
|
||||
|
||||
dbapi_instance = dbapi.get_instance()
|
||||
|
||||
db_app = dbapi_instance.kube_app_get(app_constants.HELM_APP_VAULT)
|
||||
|
||||
# User chart overrides
|
||||
new_chart_overrides = self._get_helm_overrides(
|
||||
dbapi_instance,
|
||||
db_app,
|
||||
app_constants.HELM_CHART_VAULT,
|
||||
app_constants.HELM_CHART_NS_VAULT,
|
||||
'user_overrides')
|
||||
|
||||
user_chosen_affinity = new_chart_overrides.get(
|
||||
app_constants.HELM_CHART_COMPONENT_LABEL) \
|
||||
if new_chart_overrides else None
|
||||
|
||||
if user_chosen_affinity in self.SUPPORTED_COMPONENT_OVERRIDES:
|
||||
affinity = user_chosen_affinity
|
||||
else:
|
||||
affinity = self.DEFAULT_AFFINITY
|
||||
LOG.warn((f'User override for core affinity {user_chosen_affinity} '
|
||||
f'is invalid, using default of {self.DEFAULT_AFFINITY}'))
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_VAULT: {
|
||||
self.PodNames.SERVER: {
|
||||
'ha': {
|
||||
'replicas': ha_replicas,
|
||||
},
|
||||
self.LABEL_PARAMETER: {
|
||||
app_constants.HELM_CHART_COMPONENT_LABEL: affinity
|
||||
}
|
||||
},
|
||||
self.PodNames.INJECTOR: {
|
||||
self.LABEL_PARAMETER: {
|
||||
app_constants.HELM_CHART_COMPONENT_LABEL: affinity
|
||||
}
|
||||
},
|
||||
self.PodNames.MANAGER: {
|
||||
self.LABEL_PARAMETER: {
|
||||
app_constants.HELM_CHART_COMPONENT_LABEL: affinity
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
@@ -64,3 +110,21 @@ class VaultHelm(base.BaseHelm):
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
||||
|
||||
@staticmethod
|
||||
def _get_helm_overrides(dbapi_instance, app, chart, namespace,
|
||||
type_of_overrides):
|
||||
"""Helper function for querying helm overrides from db."""
|
||||
helm_overrides = {}
|
||||
try:
|
||||
helm_overrides = dbapi_instance.helm_override_get(
|
||||
app_id=app.id,
|
||||
name=chart,
|
||||
namespace=namespace,
|
||||
)[type_of_overrides]
|
||||
|
||||
if type(helm_overrides) == str:
|
||||
helm_overrides = yaml.safe_load(helm_overrides)
|
||||
except exception.HelmOverrideNotFound:
|
||||
LOG.debug("Overrides for this chart not found, nothing to be done.")
|
||||
return helm_overrides
|
||||
|
||||
@@ -338,6 +338,9 @@ spec:
|
||||
app.kubernetes.io/name: {{ template "vault.name" . }}-manager
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
component: webhook
|
||||
{{- if .Values.manager.extraLabels }}
|
||||
{{- toYaml .Values.manager.extraLabels | nindent 8 -}}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: "{{ template "vault.fullname" . }}"
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
|
||||
Reference in New Issue
Block a user