Browse Source

ng-12: Label nodegroup nodes

With this change each node will be labeled with the following:
* --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}
* --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}

Change-Id: Ic410a059b19a1252cdf6eed786964c5c7b03d01c
tags/10.0.0.0rc1
Theodoros Tsioutsias 1 year ago
parent
commit
113fdc44b2
14 changed files with 110 additions and 0 deletions
  1. +2
    -0
      magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh
  2. +2
    -0
      magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
  3. +2
    -0
      magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh
  4. +2
    -0
      magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.sh
  5. +2
    -0
      magnum/drivers/heat/k8s_template_def.py
  6. +10
    -0
      magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml
  7. +14
    -0
      magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml
  8. +10
    -0
      magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml
  9. +10
    -0
      magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml
  10. +14
    -0
      magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml
  11. +10
    -0
      magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml
  12. +10
    -0
      magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml
  13. +10
    -0
      magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml
  14. +12
    -0
      magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py

+ 2
- 0
magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh View File

@@ -396,6 +396,8 @@ fi

KUBELET_ARGS="${KUBELET_ARGS} --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
KUBELET_ARGS="${KUBELET_ARGS} --register-with-taints=CriticalAddonsOnly=True:NoSchedule,dedicated=master:NoSchedule"
KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}"
KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}"

KUBELET_KUBECONFIG=/etc/kubernetes/kubelet-config.yaml
cat << EOF >> ${KUBELET_KUBECONFIG}


+ 2
- 0
magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh View File

@@ -224,6 +224,8 @@ KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --kubeconfig ${KUBEL
KUBELET_ARGS="${KUBELET_ARGS} --address=${KUBE_NODE_IP} --port=10250 --read-only-port=0 --anonymous-auth=false --authorization-mode=Webhook --authentication-token-webhook=true"
KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}"
KUBELET_ARGS="${KUBELET_ARGS} --volume-plugin-dir=/var/lib/kubelet/volumeplugins"
KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/role=${NODEGROUP_ROLE}"
KUBELET_ARGS="${KUBELET_ARGS} --node-labels=magnum.openstack.org/nodegroup=${NODEGROUP_NAME}"
KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}"

if [ "$(echo "${CLOUD_PROVIDER_ENABLED}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then


+ 2
- 0
magnum/drivers/common/templates/kubernetes/fragments/write-heat-params-master.sh View File

@@ -106,6 +106,8 @@ HEAT_PARAMS=/etc/sysconfig/heat-params
MIN_NODE_COUNT="$MIN_NODE_COUNT"
MAX_NODE_COUNT="$MAX_NODE_COUNT"
NPD_ENABLED="$NPD_ENABLED"
NODEGROUP_ROLE="$NODEGROUP_ROLE"
NODEGROUP_NAME="$NODEGROUP_NAME"
EOF
}



+ 2
- 0
magnum/drivers/common/templates/kubernetes/fragments/write-heat-params.sh View File

@@ -58,6 +58,8 @@ OCTAVIA_ENABLED="$OCTAVIA_ENABLED"
HEAT_CONTAINER_AGENT_TAG="$HEAT_CONTAINER_AGENT_TAG"
AUTO_HEALING_ENABLED="$AUTO_HEALING_ENABLED"
AUTO_HEALING_CONTROLLER="$AUTO_HEALING_CONTROLLER"
NODEGROUP_ROLE="$NODEGROUP_ROLE"
NODEGROUP_NAME="$NODEGROUP_NAME"
EOF
}



+ 2
- 0
magnum/drivers/heat/k8s_template_def.py View File

@@ -127,12 +127,14 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
'master_flavor': 'flavor_id',
'master_image': 'image_id',
'master_role': 'role',
'master_nodegroup_name': 'name',
})
worker_params.update({
'number_of_minions': 'node_count',
'minion_flavor': 'flavor_id',
'minion_image': 'image_id',
'worker_role': 'role',
'worker_nodegroup_name': 'name',
})
return super(
K8sTemplateDefinition, self).get_nodegroup_param_maps(


+ 10
- 0
magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml View File

@@ -66,6 +66,16 @@ parameters:
default: m1.small
description: flavor to use when booting the server for minions

master_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

worker_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

prometheus_monitoring:
type: boolean
default: false


+ 14
- 0
magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml View File

@@ -107,6 +107,16 @@ parameters:
default: m1.small
description: flavor to use when booting the server for master nodes

master_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

worker_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

minion_flavor:
type: string
default: m1.small
@@ -950,6 +960,8 @@ resources:
list_join:
- '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
nodegroup_role: {get_param: master_role}
nodegroup_name: {get_param: master_nodegroup_name}
prometheus_monitoring: {get_param: prometheus_monitoring}
grafana_admin_passwd: {get_param: grafana_admin_passwd}
api_public_address: {get_attr: [api_lb, floating_address]}
@@ -1137,6 +1149,8 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'node', '%index%']
prometheus_monitoring: {get_param: prometheus_monitoring}
nodegroup_role: {get_param: worker_role}
nodegroup_name: {get_param: worker_nodegroup_name}
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: minion_image}
minion_flavor: {get_param: minion_flavor}


+ 10
- 0
magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml View File

@@ -19,6 +19,14 @@ parameters:
type: string
description: flavor to use when booting the server

nodegroup_role:
type: string
description: the role of the nodegroup

nodegroup_name:
type: string
description: the name of the nodegroup where the node belongs

ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
@@ -680,6 +688,8 @@ resources:
"$MIN_NODE_COUNT": {get_param: min_node_count}
"$MAX_NODE_COUNT": {get_param: max_node_count}
"$NPD_ENABLED": {get_param: npd_enabled}
"$NODEGROUP_ROLE": {get_param: nodegroup_role}
"$NODEGROUP_NAME": {get_param: nodegroup_name}
- get_file: ../../common/templates/kubernetes/fragments/make-cert.sh
- get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh


+ 10
- 0
magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml View File

@@ -19,6 +19,14 @@ parameters:
type: string
description: flavor to use when booting the server

nodegroup_role:
type: string
description: the role of the nodegroup

nodegroup_name:
type: string
description: the name of the nodegroup where the node belongs

ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
@@ -401,6 +409,8 @@ resources:
$AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled}
$AUTO_HEALING_CONTROLLER: {get_param: auto_healing_controller}
$NPD_ENABLED: {get_param: npd_enabled}
$NODEGROUP_ROLE: {get_param: nodegroup_role}
$NODEGROUP_NAME: {get_param: nodegroup_name}
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh
- get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh
- get_file: ../../common/templates/fragments/configure-docker-registry.sh


+ 14
- 0
magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml View File

@@ -107,6 +107,16 @@ parameters:
default: m1.small
description: flavor to use when booting the server for master nodes

master_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

worker_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

minion_flavor:
type: string
default: m1.small
@@ -950,6 +960,8 @@ resources:
list_join:
- '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
nodegroup_role: {get_param: master_role}
nodegroup_name: {get_param: master_nodegroup_name}
prometheus_monitoring: {get_param: prometheus_monitoring}
grafana_admin_passwd: {get_param: grafana_admin_passwd}
api_public_address: {get_attr: [api_lb, floating_address]}
@@ -1138,6 +1150,8 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'node', '%index%']
prometheus_monitoring: {get_param: prometheus_monitoring}
nodegroup_role: {get_param: worker_role}
nodegroup_name: {get_param: worker_nodegroup_name}
ssh_key_name: {get_param: ssh_key_name}
ssh_public_key: {get_param: ssh_public_key}
server_image: {get_param: minion_image}


+ 10
- 0
magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml View File

@@ -19,6 +19,14 @@ parameters:
type: string
description: flavor to use when booting the server

nodegroup_role:
type: string
description: the role of the nodegroup

nodegroup_name:
type: string
description: the name of the nodegroup where the node belongs

ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
@@ -680,6 +688,8 @@ resources:
"$MIN_NODE_COUNT": {get_param: min_node_count}
"$MAX_NODE_COUNT": {get_param: max_node_count}
"$NPD_ENABLED": {get_param: npd_enabled}
"$NODEGROUP_ROLE": {get_param: nodegroup_role}
"$NODEGROUP_NAME": {get_param: nodegroup_name}
- get_file: ../../common/templates/kubernetes/fragments/make-cert.sh
- get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh


+ 10
- 0
magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml View File

@@ -19,6 +19,14 @@ parameters:
type: string
description: flavor to use when booting the server

nodegroup_role:
type: string
description: the role of the nodegroup

nodegroup_name:
type: string
description: the name of the nodegroup where the node belongs

ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
@@ -401,6 +409,8 @@ resources:
$AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled}
$AUTO_HEALING_CONTROLLER: {get_param: auto_healing_controller}
$NPD_ENABLED: {get_param: npd_enabled}
$NODEGROUP_ROLE: {get_param: nodegroup_role}
$NODEGROUP_NAME: {get_param: nodegroup_name}
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh
- get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh
- get_file: ../../common/templates/fragments/configure-docker-registry.sh


+ 10
- 0
magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml View File

@@ -65,6 +65,16 @@ parameters:
default: baremetal
description: flavor to use when booting the server

master_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

worker_nodegroup_name:
type: string
default: ""
description: the name of the nodegroup where the node belongs

prometheus_monitoring:
type: boolean
default: false


+ 12
- 0
magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py View File

@@ -371,6 +371,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'etcd_volume_type': 'lvmdriver-1',
'master_role': 'master',
'worker_role': 'worker',
'master_nodegroup_name': 'master_ng',
'worker_nodegroup_name': 'worker_ng',
}

if missing_attr is not None:
@@ -520,6 +522,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'etcd_volume_type': 'lvmdriver-1',
'master_role': 'master',
'worker_role': 'worker',
'master_nodegroup_name': 'master_ng',
'worker_nodegroup_name': 'worker_ng',
}

self.assertEqual(expected, definition)
@@ -651,6 +655,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'master_role': 'master',
'worker_role': 'worker',
'master_nodegroup_name': 'master_ng',
'worker_nodegroup_name': 'worker_ng',
}
self.assertEqual(expected, definition)
self.assertEqual(
@@ -761,6 +767,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'portal_network_cidr': '10.254.0.0/16',
'master_role': 'master',
'worker_role': 'worker',
'master_nodegroup_name': 'master_ng',
'worker_nodegroup_name': 'worker_ng',
}
self.assertEqual(expected, definition)
self.assertEqual(
@@ -869,6 +877,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'portal_network_cidr': '10.254.0.0/16',
'master_role': 'master',
'worker_role': 'worker',
'master_nodegroup_name': 'master_ng',
'worker_nodegroup_name': 'worker_ng',
}
self.assertEqual(expected, definition)
self.assertEqual(
@@ -1100,6 +1110,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'etcd_volume_type': 'lvmdriver-1',
'master_role': 'master',
'worker_role': 'worker',
'master_nodegroup_name': 'master_ng',
'worker_nodegroup_name': 'worker_ng',
}
self.assertEqual(expected, definition)
self.assertEqual(


Loading…
Cancel
Save