Configure Fedora Atomic for Kubernetes load balancer feature

Kubernetes service provides the LoadBalancer feature

This is supported through an OpenStack plugin in Kubernetes code.
Enabling this feature requires configuring the cluster properly for
Kubernetes to interface with Neutron.  Kubernetes backend will then
create the Neutron load balancer pool, members, VIP and monitor,
and manage the pool members as pods are added/removed from the
service.

This patch updates the Fedora Atomic heat templates and scripts for
the following changes:

1. Update command line parameters for Kubernetes services.

2. Pass along OpenStack info and generate the configuration file
for Kubernetes to talk to OpenStack.

3. Adjust the name for minion and private network to match what
Kubernetes expects.

4. Let kubelet register the minion automatically instead of
registering manually.

With this patch, a Kubernetes service containing the attribute
"type: LoadBalancer" will have a load balancer created in Neutron.
The new image fedora-21-atomic-6.qcow2 needs to be used since it
contains required fixes for the kube-controller-manager.
The load balancer support will be disabled by default, the user
needs to log into the master node to enter the password to
enable the feature.  See the guide for details.

The templates for CoreOS and Ironic are in following patches.

Partially-Implements: blueprint external-lb
Change-Id: I2c4b854f94fb2dfc99fc460c897f33bf59d260e3
changes/78/191878/11
Ton Ngo 7 years ago
parent f3e09133e9
commit 6d051efcc5
  1. 4
      magnum/conductor/template_definition.py
  2. 3
      magnum/templates/heat-kubernetes/fragments/configure-kubernetes-master.sh
  3. 9
      magnum/templates/heat-kubernetes/fragments/configure-kubernetes-minion.sh
  4. 32
      magnum/templates/heat-kubernetes/fragments/kube-register.yaml
  5. 5
      magnum/templates/heat-kubernetes/fragments/write-heat-params-master.yaml
  6. 22
      magnum/templates/heat-kubernetes/fragments/write-kube-os-config.sh
  7. 33
      magnum/templates/heat-kubernetes/kubecluster.yaml
  8. 28
      magnum/templates/heat-kubernetes/kubemaster.yaml
  9. 12
      magnum/templates/heat-kubernetes/kubeminion.yaml
  10. 19
      magnum/tests/unit/conductor/handlers/test_bay_conductor.py
  11. 9
      magnum/tests/unit/conductor/test_template_definition.py

@ -429,6 +429,10 @@ class AtomicK8sTemplateDefinition(BaseTemplateDefinition):
scale_mgr.get_removal_nodes(hosts))
extra_params['discovery_url'] = self.get_discovery_url(bay)
# Kubernetes backend code is still using v2 API
extra_params['auth_url'] = context.auth_url.replace("v3", "v2")
extra_params['username'] = context.user_name
extra_params['tenant_name'] = context.tenant
for label in label_list:
extra_params[label] = baymodel.labels.get(label)

@ -14,11 +14,12 @@ sed -i '
sed -i '
/^KUBE_API_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"|
/^KUBE_API_ARGS=/ s/=.*/="--runtime_config=api\/all=true"/
/^KUBE_API_ARGS=/ s/KUBE_API_ARGS./#Uncomment the following line to disable Load Balancer feature\nKUBE_API_ARGS="--runtime_config=api\/all=true"\n#Uncomment the following line to enable Load Balancer feature\n#KUBE_API_ARGS="--runtime_config=api\/all=true --cloud_config=\/etc\/sysconfig\/kube_openstack_config --cloud_provider=openstack"/
/^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd_servers=http:\/\/127.0.0.1:2379"/
/^KUBE_ADMISSION_CONTROL=/ s/=.*/=""/
' /etc/kubernetes/apiserver
sed -i '
/^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s/KUBE_CONTROLLER_MANAGER_ARGS.*/#Uncomment the following line to enable Kubernetes Load Balancer feature \n#KUBE_CONTROLLER_MANAGER_ARGS="--cloud_config=\/etc\/sysconfig\/kube_openstack_config --cloud_provider=openstack"/
' /etc/kubernetes/controller-manager

@ -4,10 +4,6 @@
echo "configuring kubernetes (minion)"
myip=$(ip addr show eth0 |
awk '$1 == "inet" {print $2}' | cut -f1 -d/)
myip_last_octet=${myip##*.}
ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP}
sed -i '
@ -18,9 +14,8 @@ sed -i '
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/="--hostname_override='"$myip"'"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_API_SERVER=/ s|=.*|="--api_servers=http://'"$KUBE_MASTER_IP"':8080"|
' /etc/kubernetes/kubelet
sed -i '
@ -39,5 +34,5 @@ EOF
sed -i '/^DOCKER_STORAGE_OPTIONS=/ s/=.*/=--storage-driver devicemapper --storage-opt dm.fs=xfs --storage-opt dm.thinpooldev=\/dev\/mapper\/docker-docker--pool --storage-opt dm.use_deferred_removal=true/' /etc/sysconfig/docker-storage
systemctl enable kube-register
hostname `hostname | sed 's/.novalocal//'`

@ -9,8 +9,7 @@ write_files:
. /etc/sysconfig/heat-params
master_url=http://${KUBE_MASTER_IP}:8080
myip=$(ip addr show eth0 |
awk '$1 == "inet" {print $2}' | cut -f1 -d/)
nova_instance_name=$(hostname -s)
# wait for master api
until curl -o /dev/null -sf "${master_url}/healthz"; do
@ -19,41 +18,24 @@ write_files:
done
if [ "$1" = "-u" ]; then
echo "unregistering minion $myip"
kubectl -s ${master_url} delete node/$myip
echo "unregistering minion $nova_instance_name"
kubectl -s ${master_url} delete node/$nova_instance_name
else
echo "registering minion $myip"
echo "registering minion $nova_instance_name"
cpu=$(($(nproc) * 1000))
memory=$(awk '/MemTotal: /{print $2 * 1024}' /proc/meminfo)
cat <<EOF | kubectl create -s ${master_url} -f-
apiVersion: v1
id: $myip
id: $nova_instance_name
kind: Node
resources:
capacity:
cpu: $cpu
memory: $memory
metadata:
name: $myip
name: $nova_instance_name
spec:
externalID: $myip
externalID: $nova_instance_name
EOF
fi
- path: /etc/systemd/system/kube-register.service
permissions: "0644"
owner: root
content: |
[Unit]
Description=Register/unregister this node with the Kubernetes master
Requires=kubelet.service
After=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/local/bin/kube-register
ExecStop=/usr/local/bin/kube-register -u
[Install]
WantedBy=kubelet.service

@ -12,3 +12,8 @@ write_files:
FLANNEL_USE_VXLAN="$FLANNEL_USE_VXLAN"
PORTAL_NETWORK_CIDR="$PORTAL_NETWORK_CIDR"
ETCD_DISCOVERY_URL="$ETCD_DISCOVERY_URL"
AUTH_URL="$AUTH_URL"
USERNAME="$USERNAME"
PASSWORD="$PASSWORD"
TENANT_NAME="$TENANT_NAME"
CLUSTER_SUBNET="$CLUSTER_SUBNET"

@ -0,0 +1,22 @@
#!/bin/sh
. /etc/sysconfig/heat-params
KUBE_OS_CLOUD_CONFIG=/etc/sysconfig/kube_openstack_config
# Generate a the configuration for Kubernetes services
# to talk to OpenStack Neutron
cat > $KUBE_OS_CLOUD_CONFIG <<EOF
[Global]
auth-url=$AUTH_URL
Username=$USERNAME
Password=$PASSWORD
tenant-name=$TENANT_NAME
[LoadBalancer]
subnet-id=$CLUSTER_SUBNET
create-monitor=yes
monitor-delay=1m
monitor-timeout=30s
monitor-max-retries=3
EOF

@ -174,15 +174,44 @@ parameters:
size fo the data segments for the swift dynamic large objects
default: 5242880
auth_url:
type: string
description: >
url for kubernetes to authenticate before sending request to neutron
must be v2 since kubernetes backend only suppor v2 at this point
username:
type: string
description: >
user account
password:
type: string
description: >
user password, not set in current implementation, only used to
fill in for Kubernetes config file
default:
ChangeMe
tenant_name:
type: string
description: >
tenant name
resources:
######################################################################
#
# network resources. allocate a network and router for our server.
# Important: the Load Balancer feature in Kubernetes requires that
# the name for the fixed_network must be "private" for the
# address lookup in Kubernetes to work properly
#
fixed_network:
type: OS::Neutron::Net
properties:
name: private
fixed_subnet:
type: OS::Neutron::Subnet
@ -282,6 +311,10 @@ resources:
fixed_subnet: {get_resource: fixed_subnet}
api_pool_id: {get_resource: api_pool}
etcd_pool_id: {get_resource: etcd_pool}
auth_url: {get_param: auth_url}
username: {get_param: username}
password: {get_param: password}
tenant_name: {get_param: tenant_name}
######################################################################
#

@ -83,6 +83,22 @@ parameters:
etcd_pool_id:
type: string
description: ID of the load balancer pool of etcd server.
auth_url:
type: string
description: >
url for kubernetes to authenticate before sending request to neutron
username:
type: string
description: >
user account
password:
type: string
description: >
user password
tenant_name:
type: string
description: >
tenant name
resources:
@ -149,6 +165,11 @@ resources:
"$FLANNEL_USE_VXLAN": {get_param: flannel_use_vxlan}
"$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr}
"$ETCD_DISCOVERY_URL": {get_param: discovery_url}
"$AUTH_URL": {get_param: auth_url}
"$USERNAME": {get_param: username}
"$PASSWORD": {get_param: password}
"$TENANT_NAME": {get_param: tenant_name}
"$CLUSTER_SUBNET": {get_param: fixed_subnet}
configure_etcd:
type: OS::Heat::SoftwareConfig
@ -156,6 +177,12 @@ resources:
group: ungrouped
config: {get_file: fragments/configure-etcd.sh}
write_kube_os_config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/write-kube-os-config.sh}
configure_kubernetes:
type: OS::Heat::SoftwareConfig
properties:
@ -218,6 +245,7 @@ resources:
- config: {get_resource: write_heat_params}
- config: {get_resource: configure_etcd}
- config: {get_resource: kube_user}
- config: {get_resource: write_kube_os_config}
- config: {get_resource: configure_kubernetes}
- config: {get_resource: enable_services}
- config: {get_resource: write_network_config}

@ -127,7 +127,7 @@ resources:
minion_wait_condition:
type: OS::Heat::WaitCondition
depends_on: kube_minion
depends_on: kube-minion
properties:
handle: {get_resource: minion_wait_handle}
timeout: {get_param: wait_condition_timeout}
@ -261,9 +261,15 @@ resources:
######################################################################
#
# a single kubernetes minion.
# Important: the name for the heat resource kube-minion below must
# not contain "_" (underscore) because it will be used in the
# hostname. Because DNS domain name does not allow "_", the "_"
# will be converted to a "-" and this will make the hostname different
# from the Nova instance name. This in turn will break the load
# balancer feature in Kubernetes.
#
kube_minion:
kube-minion:
type: OS::Nova::Server
properties:
image: {get_param: server_image}
@ -304,7 +310,7 @@ resources:
docker_volume_attach:
type: OS::Cinder::VolumeAttachment
properties:
instance_uuid: {get_resource: kube_minion}
instance_uuid: {get_resource: kube-minion}
volume_id: {get_resource: docker_volume}
mountpoint: /dev/vdb

@ -52,6 +52,7 @@ class TestBayConductorWithK8s(base.TestCase):
'labels': {'flannel_network_cidr': '10.101.0.0/16',
'flannel_network_subnetlen': '26',
'flannel_use_vxlan': 'yes'},
}
self.bay_dict = {
'baymodel_id': 'xx-xx-xx-xx',
@ -66,6 +67,9 @@ class TestBayConductorWithK8s(base.TestCase):
'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
}
self.context.auth_url = 'http://192.168.10.10:5000/v3'
self.context.user_name = 'fake_user'
self.context.tenant = 'fake_tenant'
@patch('magnum.objects.BayModel.get_by_uuid')
def test_extract_template_definition(
@ -128,6 +132,9 @@ class TestBayConductorWithK8s(base.TestCase):
'http_proxy': 'http_proxy',
'https_proxy': 'https_proxy',
'no_proxy': 'no_proxy',
'auth_url': 'http://192.168.10.10:5000/v2',
'tenant_name': 'fake_tenant',
'username': 'fake_user',
}
if missing_attr is not None:
expected.pop(mapping[missing_attr], None)
@ -176,6 +183,9 @@ class TestBayConductorWithK8s(base.TestCase):
'flannel_network_cidr': '10.101.0.0/16',
'flannel_network_subnetlen': '26',
'flannel_use_vxlan': 'yes',
'auth_url': 'http://192.168.10.10:5000/v2',
'tenant_name': 'fake_tenant',
'username': 'fake_user',
}
self.assertEqual(expected, definition)
@ -221,6 +231,9 @@ class TestBayConductorWithK8s(base.TestCase):
'flannel_network_cidr': '10.101.0.0/16',
'flannel_network_subnetlen': '26',
'flannel_use_vxlan': 'yes',
'auth_url': 'http://192.168.10.10:5000/v2',
'tenant_name': 'fake_tenant',
'username': 'fake_user',
}
self.assertEqual(expected, definition)
@ -306,6 +319,9 @@ class TestBayConductorWithK8s(base.TestCase):
'flannel_network_cidr': '10.101.0.0/16',
'flannel_network_subnetlen': '26',
'flannel_use_vxlan': 'yes',
'auth_url': 'http://192.168.10.10:5000/v2',
'tenant_name': 'fake_tenant',
'username': 'fake_user',
}
self.assertIn('token', definition)
del definition['token']
@ -376,6 +392,9 @@ class TestBayConductorWithK8s(base.TestCase):
'flannel_network_cidr': '10.101.0.0/16',
'flannel_network_subnetlen': '26',
'flannel_use_vxlan': 'yes',
'auth_url': 'http://192.168.10.10:5000/v2',
'tenant_name': 'fake_tenant',
'username': 'fake_user',
}
self.assertEqual(expected, definition)
reqget.assert_called_once_with('http://etcd/test?size=1')

@ -185,6 +185,10 @@ class AtomicK8sTemplateDefinitionTestCase(base.TestCase):
mock_scale_manager.get_removal_nodes.return_value = removal_nodes
mock_get_discovery_url.return_value = 'fake_discovery_url'
mock_context.auth_url = 'http://192.168.10.10:5000/v3'
mock_context.user_name = 'fake_user'
mock_context.tenant = 'fake_tenant'
flannel_cidr = mock_baymodel.labels.get('flannel_network_cidr')
flannel_subnet = mock_baymodel.labels.get('flannel_network_subnetlen')
flannel_vxlan = mock_baymodel.labels.get('flannel_use_vxlan')
@ -199,7 +203,10 @@ class AtomicK8sTemplateDefinitionTestCase(base.TestCase):
'discovery_url': 'fake_discovery_url',
'flannel_network_cidr': flannel_cidr,
'flannel_use_vxlan': flannel_subnet,
'flannel_network_subnetlen': flannel_vxlan}}
'flannel_network_subnetlen': flannel_vxlan,
'auth_url': 'http://192.168.10.10:5000/v2',
'username': 'fake_user',
'tenant_name': 'fake_tenant'}}
mock_get_params.assert_called_once_with(mock_context, mock_baymodel,
mock_bay, **expected_kwargs)

Loading…
Cancel
Save