From 419a2285032b8fe209c95f518c32d6e5e191f45a Mon Sep 17 00:00:00 2001 From: Rick Cano Date: Thu, 28 Jun 2018 17:56:46 -0400 Subject: [PATCH] Fixing CoreOS driver Decoding ca on nodes Change-Id: I4a30a348c1c0a62cb1a7b429b05878f321db92ed --- .../drivers/heat/k8s_coreos_template_def.py | 132 ++++++ magnum/drivers/k8s_coreos_v1/template_def.py | 23 +- .../templates/fragments/add-ext-ca-certs.yaml | 10 +- .../templates/fragments/configure-docker.yaml | 15 + .../fragments/enable-docker-mount.yaml | 52 +++ .../fragments/enable-kubelet-master.yaml | 2 +- .../fragments/enable-kubelet-minion.yaml | 1 - .../templates/fragments/wc-notify.yaml | 6 + .../fragments/write-heat-params-master.yaml | 2 +- .../fragments/write-heat-params.yaml | 2 +- .../templates/fragments/write-kubeconfig.yaml | 1 + .../fragments/write-master-kubeconfig.yaml | 21 + .../k8s_coreos_v1/templates/kubecluster.yaml | 422 ++++++++++++++---- .../k8s_coreos_v1/templates/kubemaster.yaml | 309 ++++++++++--- .../k8s_coreos_v1/templates/kubeminion.yaml | 197 ++++++-- .../handlers/test_k8s_cluster_conductor.py | 20 + 16 files changed, 1002 insertions(+), 213 deletions(-) create mode 100644 magnum/drivers/heat/k8s_coreos_template_def.py create mode 100644 magnum/drivers/k8s_coreos_v1/templates/fragments/enable-docker-mount.yaml create mode 100644 magnum/drivers/k8s_coreos_v1/templates/fragments/write-master-kubeconfig.yaml diff --git a/magnum/drivers/heat/k8s_coreos_template_def.py b/magnum/drivers/heat/k8s_coreos_template_def.py new file mode 100644 index 0000000000..5eb1a801fe --- /dev/null +++ b/magnum/drivers/heat/k8s_coreos_template_def.py @@ -0,0 +1,132 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +from oslo_log import log as logging +from oslo_utils import strutils + +from magnum.common import utils +from magnum.common.x509 import operations as x509 +from magnum.conductor.handlers.common import cert_manager +from magnum.drivers.heat import k8s_template_def +from magnum.drivers.heat import template_def +from oslo_config import cfg + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class ServerAddressOutputMapping(template_def.OutputMapping): + + public_ip_output_key = None + private_ip_output_key = None + + def __init__(self, dummy_arg, cluster_attr=None): + self.cluster_attr = cluster_attr + self.heat_output = self.public_ip_output_key + + def set_output(self, stack, cluster_template, cluster): + if not cluster_template.floating_ip_enabled: + self.heat_output = self.private_ip_output_key + + LOG.debug("Using heat_output: %s", self.heat_output) + super(ServerAddressOutputMapping, + self).set_output(stack, cluster_template, cluster) + + +class MasterAddressOutputMapping(ServerAddressOutputMapping): + public_ip_output_key = 'kube_masters' + private_ip_output_key = 'kube_masters_private' + + +class NodeAddressOutputMapping(ServerAddressOutputMapping): + public_ip_output_key = 'kube_minions' + private_ip_output_key = 'kube_minions_private' + + +class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): + """Kubernetes template for a CoreOS.""" + + def __init__(self): + super(CoreOSK8sTemplateDefinition, self).__init__() + self.add_parameter('docker_volume_size', + cluster_attr='docker_volume_size') + self.add_parameter('docker_storage_driver', + cluster_template_attr='docker_storage_driver') + self.add_output('kube_minions', + cluster_attr='node_addresses', + mapping_type=NodeAddressOutputMapping) + self.add_output('kube_masters', + cluster_attr='master_addresses', + mapping_type=MasterAddressOutputMapping) + + def get_params(self, context, cluster_template, cluster, **kwargs): + extra_params = kwargs.pop('extra_params', {}) + + extra_params['username'] = context.user_name + osc = self.get_osc(context) + extra_params['region_name'] = osc.cinder_region_name() + + # set docker_volume_type + # use the configuration default if None provided + docker_volume_type = cluster.labels.get( + 'docker_volume_type', CONF.cinder.default_docker_volume_type) + extra_params['docker_volume_type'] = docker_volume_type + + extra_params['nodes_affinity_policy'] = \ + CONF.cluster.nodes_affinity_policy + + if cluster_template.network_driver == 'flannel': + extra_params["pods_network_cidr"] = \ + cluster.labels.get('flannel_network_cidr', '10.100.0.0/16') + if cluster_template.network_driver == 'calico': + extra_params["pods_network_cidr"] = \ + cluster.labels.get('calico_ipv4pool', '192.168.0.0/16') + + label_list = ['kube_tag', 'container_infra_prefix', + 'availability_zone', + 'calico_tag', 'calico_cni_tag', + 'calico_kube_controllers_tag', 'calico_ipv4pool', + 'etcd_tag', 'flannel_tag'] + for label in label_list: + label_value = cluster.labels.get(label) + if label_value: + extra_params[label] = label_value + + cert_manager_api = cluster.labels.get('cert_manager_api') + if strutils.bool_from_string(cert_manager_api): + extra_params['cert_manager_api'] = cert_manager_api + ca_cert = cert_manager.get_cluster_ca_certificate(cluster) + extra_params['ca_key'] = x509.decrypt_key( + ca_cert.get_private_key(), + ca_cert.get_private_key_passphrase()).replace("\n", "\\n") + + plain_openstack_ca = utils.get_openstack_ca() + encoded_openstack_ca = base64.b64encode(plain_openstack_ca.encode()) + extra_params['openstack_ca_coreos'] = encoded_openstack_ca.decode() + + return super(CoreOSK8sTemplateDefinition, + self).get_params(context, cluster_template, cluster, + extra_params=extra_params, + **kwargs) + + def get_env_files(self, cluster_template, cluster): + env_files = [] + + template_def.add_priv_net_env_file(env_files, cluster_template) + template_def.add_etcd_volume_env_file(env_files, cluster_template) + template_def.add_volume_env_file(env_files, cluster) + template_def.add_lb_env_file(env_files, cluster_template) + template_def.add_fip_env_file(env_files, cluster_template) + + return env_files diff --git a/magnum/drivers/k8s_coreos_v1/template_def.py b/magnum/drivers/k8s_coreos_v1/template_def.py index 448dcc7a8e..194e746ab1 100644 --- a/magnum/drivers/k8s_coreos_v1/template_def.py +++ b/magnum/drivers/k8s_coreos_v1/template_def.py @@ -14,30 +14,13 @@ import os import magnum.conf -from magnum.drivers.heat import k8s_template_def -from magnum.drivers.heat import template_def +from magnum.drivers.heat import k8s_coreos_template_def as kctd CONF = magnum.conf.CONF -class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition): - """Kubernetes template for CoreOS VM.""" - - def __init__(self): - super(CoreOSK8sTemplateDefinition, self).__init__() - self.add_output('kube_minions', - cluster_attr='node_addresses') - self.add_output('kube_masters', - cluster_attr='master_addresses') - - def get_env_files(self, cluster_template, cluster): - env_files = [] - - template_def.add_priv_net_env_file(env_files, cluster_template) - template_def.add_lb_env_file(env_files, cluster_template) - template_def.add_fip_env_file(env_files, cluster_template) - - return env_files +class CoreOSK8sTemplateDefinition(kctd.CoreOSK8sTemplateDefinition): + """Kubernetes template for a CoreOS Atomic VM.""" @property def driver_module_path(self): diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/add-ext-ca-certs.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/add-ext-ca-certs.yaml index 9cb8e304d7..1f2d86545e 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/add-ext-ca-certs.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/add-ext-ca-certs.yaml @@ -15,6 +15,13 @@ write_files: [Install] WantedBy=multi-user.target + - path: /etc/ssl/certs/openstack-ca.pem + owner: "root:root" + permissions: "0644" + encoding: b64 + content: | + $OPENSTACK_CA + - path: /etc/sysconfig/add-ext-ca-certs.sh owner: "root:root" permissions: "0755" @@ -22,9 +29,8 @@ write_files: #!/bin/sh CERT_FILE=/etc/ssl/certs/openstack-ca.pem - if [ -n "$OPENSTACK_CA" ] + if [ -f "$CERT_FILE" ] then - echo -ne "$OPENSTACK_CA" | tee -a ${CERT_FILE} chmod 0644 ${CERT_FILE} chown root:root ${CERT_FILE} diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml index 09600fa517..c7b17679fe 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/configure-docker.yaml @@ -1,5 +1,20 @@ #cloud-config write_files: + - path: /etc/systemd/system/var-lib-docker.mount + owner: "root:root" + permissions: "0644" + content: | + [Unit] + Description=Mount ephemeral to /var/lib/docker + + [Mount] + What=/dev/vdb + Where=/var/lib/docker + Type=ext4 + + [Install] + WantedBy=local-fs.target + - path: /etc/systemd/system/configure-docker.service owner: "root:root" permissions: "0644" diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-docker-mount.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-docker-mount.yaml new file mode 100644 index 0000000000..f5e4d426a1 --- /dev/null +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-docker-mount.yaml @@ -0,0 +1,52 @@ +#cloud-config +write_files: + - path: /etc/sytemd/system/var-lib-docker.mount + owner: "root:root" + permissions: "0644" + content: | + [Unit] + Description=Mount ephemeral to /var/lib/docker + + [Mount] + What=/dev/vdb + Where=/var/lib/docker + Type=ext4 + + [Install] + WantedBy=local-fs.target + + - path: /etc/sysconfig/enable-docker-mount.sh + owner: "root:root" + permissions: "0755" + content: | + #!/bin/sh + if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then + if [[ $(blkid -o value -s TYPE /dev/vdb) ]]; then + systemctl daemon-reload + systemctl start var-lib-docker.mount + systemctl enable var-lib-docker.mount + else + mkfs -t ext4 /dev/vdb + systemctl daemon-reload + systemctl start var-lib-docker.mount + systemctl enable var-lib-docker.mount + fi + fi + + - path: /etc/systemd/system/enable-docker-mount.service + owner: "root:root" + permissions: "0644" + content: | + [Unit] + Description=Mount docker volume + + [Service] + Type=oneshot + EnvironmentFile=/etc/sysconfig/heat-params + ExecStart=/etc/sysconfig/enable-docker-mount.sh + + [Install] + RequiredBy=multi-user.target + + + diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-master.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-master.yaml index 868c2f4cac..5365595328 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-master.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-master.yaml @@ -56,7 +56,7 @@ write_files: ExecStartPre=/usr/bin/mkdir -p /var/log/containers ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file} ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --api-servers=http://127.0.0.1:8080 \ + --kubeconfig=/etc/kubernetes/master-kubeconfig.yaml \ --cni-conf-dir=/etc/kubernetes/cni/net.d \ --network-plugin=cni \ --hostname-override=${HOSTNAME_OVERRIDE} \ diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml index 486ebd6a56..6070215b1c 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/enable-kubelet-minion.yaml @@ -68,7 +68,6 @@ write_files: ExecStartPre=/usr/bin/mkdir -p /var/log/containers ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file} ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --api-servers=${KUBE_MASTER_URI} \ --cni-conf-dir=/etc/kubernetes/cni/net.d \ --network-plugin=cni \ --hostname-override=${HOSTNAME_OVERRIDE} \ diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml index 7857bd771d..6315bb332e 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/wc-notify.yaml @@ -20,5 +20,11 @@ write_files: permissions: "0755" content: | #!/bin/bash -v + if [ "$VERIFY_CA" == "True" ]; then + VERIFY_CA="" + else + VERIFY_CA="-k" + fi + command="$WAIT_CURL $VERIFY_CA --data-binary '{\"status\": \"SUCCESS\"}'" eval $(echo "$command") diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml index fd379d57c1..7b16fcee31 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params-master.yaml @@ -12,6 +12,7 @@ write_files: KUBE_NODE_IP="$KUBE_NODE_IP" KUBE_ALLOW_PRIV="$KUBE_ALLOW_PRIV" DOCKER_VOLUME="$DOCKER_VOLUME" + DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER" FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR" @@ -49,4 +50,3 @@ write_files: KUBE_DASHBOARD_VERSION="$KUBE_DASHBOARD_VERSION" DNS_SERVICE_IP="$DNS_SERVICE_IP" DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" - OCTAVIA_ENABLED="$OCTAVIA_ENABLED" diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml index 3a40a76e54..8a376f49fe 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-heat-params.yaml @@ -12,6 +12,7 @@ write_files: KUBE_NODE_IP="$KUBE_NODE_IP" ETCD_SERVER_IP="$ETCD_SERVER_IP" DOCKER_VOLUME="$DOCKER_VOLUME" + DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE" DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER" NETWORK_DRIVER="$NETWORK_DRIVER" REGISTRY_ENABLED="$REGISTRY_ENABLED" @@ -47,4 +48,3 @@ write_files: CONTAINER_RUNTIME="$CONTAINER_RUNTIME" DNS_SERVICE_IP="$DNS_SERVICE_IP" DNS_CLUSTER_DOMAIN="$DNS_CLUSTER_DOMAIN" - OCTAVIA_ENABLED="$OCTAVIA_ENABLED" diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml index c6661bbb20..f7cc1b1025 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-kubeconfig.yaml @@ -10,6 +10,7 @@ write_files: clusters: - name: local cluster: + server: https://$KUBE_MASTER_IP:$KUBE_API_PORT certificate-authority: /etc/kubernetes/ssl/ca.pem users: - name: kubelet diff --git a/magnum/drivers/k8s_coreos_v1/templates/fragments/write-master-kubeconfig.yaml b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-master-kubeconfig.yaml new file mode 100644 index 0000000000..25e71e68cc --- /dev/null +++ b/magnum/drivers/k8s_coreos_v1/templates/fragments/write-master-kubeconfig.yaml @@ -0,0 +1,21 @@ +#cloud-config +merge_how: dict(recurse_array)+list(append) +write_files: + - path: /etc/kubernetes/master-kubeconfig.yaml + owner: "root:root" + permissions: "0644" + content: | + apiVersion: v1 + kind: Config + clusters: + - name: local + cluster: + server: http://127.0.0.1:8080 + users: + - name: kubelet + contexts: + - context: + cluster: local + user: kubelet + name: kubelet-context + current-context: kubelet-context diff --git a/magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml b/magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml index a8ddfd8b5a..4c8b6d5751 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/kubecluster.yaml @@ -1,15 +1,19 @@ heat_template_version: 2014-10-16 description: > - This template will boot a coreos cluster with one or more minions (as - specified by the number_of_minions parameter, which defaults to 1) and one - master node. Allowing multiple masters is a work in progress. + This template will boot a Kubernetes cluster with one or more + minions (as specified by the number_of_minions parameter, which + defaults to 1). parameters: + octavia_enabled: + type: string + default: true + ssh_key_name: type: string - description: name of ssh key to be provisioned on the servers + description: name of ssh key to be provisioned on our server external_network: type: string @@ -28,18 +32,17 @@ parameters: server_image: type: string - default: CoreOS - description: glance image used to boot the servers + description: glance image used to boot the server master_flavor: type: string default: m1.small - description: flavor to use when booting the server for master node + description: flavor to use when booting the server for master nodes minion_flavor: type: string default: m1.small - description: flavor to use when booting the servers for minions + description: flavor to use when booting the server for minions prometheus_monitoring: type: boolean @@ -54,14 +57,9 @@ parameters: description: > admin user password for the Grafana monitoring interface - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - dns_nameserver: type: string - description: address of a dns nameserver reachable in your environment + description: address of a DNS nameserver reachable in your environment default: 8.8.8.8 number_of_masters: @@ -85,6 +83,11 @@ parameters: address range used by kubernetes for service portals default: 10.254.0.0/16 + network_driver: + type: string + description: network driver to use for instantiating container networks + default: flannel + flannel_network_cidr: type: string description: network range for flannel overlay network @@ -99,7 +102,7 @@ parameters: type: string description: > specify the backend for flannel, default udp backend - default: "host-gw" + default: "udp" constraints: - allowed_values: ["udp", "vxlan", "host-gw"] @@ -131,19 +134,115 @@ parameters: constraints: - allowed_values: ["true", "false"] + etcd_volume_size: + type: number + description: > + size of the cinder volume for etcd storage + default: 0 + + docker_volume_size: + type: number + description: > + size of a cinder volume to allocate to docker for container/image + storage + default: 0 + + docker_volume_type: + type: string + description: > + type of a cinder volume to allocate to docker for container/image + storage + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + + wait_condition_timeout: + type: number + description: > + timeout for the Wait Conditions + default: 6000 + minions_to_remove: type: comma_delimited_list description: > List of minions to be removed when doing an update. Individual minion may be referenced several ways: (1) The resource name (e.g. ['1', '3']), (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should - be empty when doing a create. + be empty when doing an create. default: [] - network_driver: + discovery_url: type: string - description: network driver to use for instantiating container networks - default: flannel + description: > + Discovery URL used for bootstrapping the etcd cluster. + + registry_enabled: + type: boolean + description: > + Indicates whether the docker registry is enabled. + default: false + + registry_port: + type: number + description: port of registry service + default: 5000 + + swift_region: + type: string + description: region of swift service + default: "" + + registry_container: + type: string + description: > + name of swift container which docker registry stores images in + default: "container" + + registry_insecure: + type: boolean + description: > + indicates whether to skip TLS verification between registry and backend storage + default: true + + registry_chunksize: + type: number + description: > + size fo the data segments for the swift dynamic large objects + default: 5242880 + + volume_driver: + type: string + description: volume driver to use for container storage + default: "" + + region_name: + type: string + description: A logically separate section of the cluster + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password, not set in current implementation, only used to + fill in for Kubernetes config file + default: + ChangeMe + hidden: true + + loadbalancing_protocol: + type: string + description: > + The protocol which is used for load balancing. If you want to change + tls_disabled option to 'True', please change this to "HTTP". + default: TCP + constraints: + - allowed_values: ["TCP", "HTTP"] tls_disabled: type: boolean @@ -152,7 +251,7 @@ parameters: kube_dashboard_enabled: type: boolean - description: whether or not to disable kubernetes dashboard + description: whether or not to enable kubernetes dashboard default: True influx_grafana_dashboard_enabled: @@ -164,15 +263,6 @@ parameters: type: boolean description: whether or not to validate certificate authority - loadbalancing_protocol: - type: string - description: > - The protocol which is used for load balancing. If you want to change - tls_disabled option to 'True', please change this to "HTTP". - default: TCP - constraints: - - allowed_values: ["TCP", "HTTP"] - kubernetes_port: type: number description: > @@ -206,43 +296,53 @@ parameters: trustee_domain_id: type: string description: domain id of the trustee - default: "" trustee_user_id: type: string description: user id of the trustee - default: "" trustee_username: type: string description: username of the trustee - default: "" trustee_password: type: string description: password of the trustee - default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee - default: "" hidden: true auth_url: type: string description: url for keystone + kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + default: v1.9.3 + + etcd_tag: + type: string + description: tag of the etcd system container + default: v3.2.7 + + flannel_tag: + type: string + description: tag of the flannel system containers + default: v0.9.0 + kube_version: type: string description: version of kubernetes used for kubernetes cluster - default: v1.6.2_coreos.0 + default: v1.10.3_coreos.0 kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster - default: v1.5.1 + default: v1.8.3 hyperkube_image: type: string @@ -250,38 +350,20 @@ parameters: Docker registry used for hyperkube image default: quay.io/coreos/hyperkube - registry_enabled: - type: boolean - description: > - Indicates whether the docker registry is enabled. - default: false - - registry_port: - type: number - description: port of registry service - default: 5000 - - wait_condition_timeout: - type: number - description: > - timeout for the Wait Conditions - default: 6000 - insecure_registry_url: type: string description: insecure registry url + default: "" + + container_infra_prefix: + type: string + description: > + prefix of container images used in the cluster, kubernetes components, + kubernetes-dashboard, coredns etc constraints: - allowed_pattern: "^$|.*/" default: "" - container_runtime: - type: string - description: > - Container runtime to use with Kubernetes. - default: "docker" - constraints: - - allowed_values: ["docker"] - dns_service_ip: type: string description: > @@ -299,6 +381,11 @@ parameters: hidden: true description: The OpenStack CA certificate to install on the node. + openstack_ca_coreos: + type: string + hidden: true + description: The OpenStack CA certificate to install on the node. + nodes_affinity_policy: type: string description: > @@ -307,17 +394,104 @@ parameters: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] - octavia_enabled: - type: boolean + availability_zone: + type: string description: > - whether or not to use Octavia for LoadBalancer type service. - default: False + availability zone for master and nodes + default: "" + + cert_manager_api: + type: boolean + description: true if the kubernetes cert api manager should be enabled + default: false + + ca_key: + type: string + description: key of internal ca for the kube certificate api manager + default: "" + hidden: true + + calico_tag: + type: string + description: tag of the calico containers used to provision the calico node + default: v2.6.7 + + calico_cni_tag: + type: string + description: tag of the cni used to provision the calico node + default: v1.11.2 + + calico_kube_controllers_tag: + type: string + description: tag of the kube_controllers used to provision the calico node + default: v1.0.3 + + calico_ipv4pool: + type: string + description: Configure the IP pool from which Pod IPs will be chosen + default: "192.168.0.0/16" + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + ingress_controller: + type: string + description: > + ingress controller backend to use + default: "" + + ingress_controller_role: + type: string + description: > + node role where the ingress controller backend should run + default: "ingress" + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + default: "" + + kubeapi_options: + type: string + description: > + additional options to be passed to the api + default: "" + + kubecontroller_options: + type: string + description: > + additional options to be passed to the controller manager + default: "" + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + default: "" + + kubescheduler_options: + type: string + description: > + additional options to be passed to the scheduler + default: "" + + container_runtime: + type: string + description: > + Container runtime to use with Kubernetes. + default: "docker" + constraints: + - allowed_values: ["docker"] + + resources: ###################################################################### # - # network resources. allocate a network and router for our server. + # network resources. allocate a network and router for our server. # Important: the Load Balancer feature in Kubernetes requires that # the name for the fixed_network must be "private" for the # address lookup in Kubernetes to work properly @@ -349,13 +523,13 @@ resources: protocol: {get_param: loadbalancing_protocol} port: 2379 - ###################################################################### + ###################################################################### # # security groups. we need to permit network traffic of various # sorts. # - secgroup_master: + secgroup_kube_master: type: OS::Neutron::SecurityGroup properties: rules: @@ -378,8 +552,11 @@ resources: - protocol: tcp port_range_min: 6443 port_range_max: 6443 + - protocol: tcp + port_range_min: 30000 + port_range_max: 32767 - secgroup_minion_all_open: + secgroup_kube_minion: type: OS::Neutron::SecurityGroup properties: rules: @@ -433,7 +610,7 @@ resources: ###################################################################### # # kubernetes masters. This is a resource group that will create - # master. + # masters. # kube_masters: @@ -449,6 +626,8 @@ resources: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] + prometheus_monitoring: {get_param: prometheus_monitoring} + grafana_admin_passwd: {get_param: grafana_admin_passwd} api_public_address: {get_attr: [api_lb, floating_address]} api_private_address: {get_attr: [api_lb, address]} ssh_key_name: {get_param: ssh_key_name} @@ -456,6 +635,12 @@ resources: master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} + etcd_volume_size: {get_param: etcd_volume_size} + docker_volume_size: {get_param: docker_volume_size} + docker_volume_type: {get_param: docker_volume_type} + docker_storage_driver: {get_param: docker_storage_driver} + wait_condition_timeout: {get_param: wait_condition_timeout} + network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} @@ -463,26 +648,29 @@ resources: system_pods_timeout: {get_param: system_pods_timeout} portal_network_cidr: {get_param: portal_network_cidr} admission_control_list: {get_param: admission_control_list} + discovery_url: {get_param: discovery_url} + cluster_uuid: {get_param: cluster_uuid} + magnum_url: {get_param: magnum_url} + volume_driver: {get_param: volume_driver} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} - discovery_url: {get_param: discovery_url} - network_driver: {get_param: network_driver} + api_pool_id: {get_attr: [api_lb, pool_id]} + etcd_pool_id: {get_attr: [etcd_lb, pool_id]} + username: {get_param: username} + password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} - secgroup_kube_master_id: {get_resource: secgroup_master} + secgroup_kube_master_id: {get_resource: secgroup_kube_master} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} + kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} + etcd_tag: {get_param: etcd_tag} kube_dashboard_version: {get_param: kube_dashboard_version} - wait_condition_timeout: {get_param: wait_condition_timeout} - cluster_uuid: {get_param: cluster_uuid} - api_pool_id: {get_attr: [api_lb, pool_id]} - etcd_pool_id: {get_attr: [etcd_lb, pool_id]} - magnum_url: {get_param: magnum_url} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} trust_id: {get_param: trust_id} @@ -490,18 +678,31 @@ resources: hyperkube_image: {get_param: hyperkube_image} insecure_registry_url: {get_param: insecure_registry_url} container_runtime: {get_param: container_runtime} - prometheus_monitoring: {get_param: prometheus_monitoring} - grafana_admin_passwd: {get_param: grafana_admin_passwd} + container_infra_prefix: {get_param: container_infra_prefix} etcd_lb_vip: {get_attr: [etcd_lb, address]} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} - openstack_ca: {get_param: openstack_ca} + openstack_ca: {get_param: openstack_ca_coreos} nodes_server_group_id: {get_resource: nodes_server_group} - octavia_enabled: {get_param: octavia_enabled} + availability_zone: {get_param: availability_zone} + ca_key: {get_param: ca_key} + cert_manager_api: {get_param: cert_manager_api} + calico_tag: {get_param: calico_tag} + calico_cni_tag: {get_param: calico_cni_tag} + calico_kube_controllers_tag: {get_param: calico_kube_controllers_tag} + calico_ipv4pool: {get_param: calico_ipv4pool} + pods_network_cidr: {get_param: pods_network_cidr} + ingress_controller: {get_param: ingress_controller} + ingress_controller_role: {get_param: ingress_controller_role} + kubelet_options: {get_param: kubelet_options} + kubeapi_options: {get_param: kubeapi_options} + kubeproxy_options: {get_param: kubeproxy_options} + kubecontroller_options: {get_param: kubecontroller_options} + kubescheduler_options: {get_param: kubescheduler_options} ###################################################################### # - # kubernetes minions. This is a resource group that will initially + # kubernetes minions. This is an resource group that will initially # create minions, and needs to be manually scaled. # @@ -509,7 +710,6 @@ resources: type: OS::Heat::ResourceGroup depends_on: - network - - kube_masters properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] @@ -520,41 +720,62 @@ resources: list_join: - '-' - [{ get_param: 'OS::stack_name' }, 'minion', '%index%'] + prometheus_monitoring: {get_param: prometheus_monitoring} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} minion_flavor: {get_param: minion_flavor} fixed_network: {get_attr: [network, fixed_network]} fixed_subnet: {get_attr: [network, fixed_subnet]} + network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} - network_driver: {get_param: network_driver} + docker_volume_size: {get_param: docker_volume_size} + docker_volume_type: {get_param: docker_volume_type} + docker_storage_driver: {get_param: docker_storage_driver} + wait_condition_timeout: {get_param: wait_condition_timeout} + registry_enabled: {get_param: registry_enabled} + registry_port: {get_param: registry_port} + swift_region: {get_param: swift_region} + registry_container: {get_param: registry_container} + registry_insecure: {get_param: registry_insecure} + registry_chunksize: {get_param: registry_chunksize} + cluster_uuid: {get_param: cluster_uuid} + magnum_url: {get_param: magnum_url} + volume_driver: {get_param: volume_driver} + region_name: {get_param: region_name} + auth_url: {get_param: auth_url} + hyperkube_image: {get_param: hyperkube_image} + username: {get_param: username} + password: {get_param: password} kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} verify_ca: {get_param: verify_ca} - secgroup_kube_minion_id: {get_resource: secgroup_minion_all_open} + secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} http_proxy: {get_param: http_proxy} https_proxy: {get_param: https_proxy} no_proxy: {get_param: no_proxy} + kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} - wait_condition_timeout: {get_param: wait_condition_timeout} - cluster_uuid: {get_param: cluster_uuid} - magnum_url: {get_param: magnum_url} + flannel_tag: {get_param: flannel_tag} trustee_user_id: {get_param: trustee_user_id} + trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} + trustee_domain_id: {get_param: trustee_domain_id} trust_id: {get_param: trust_id} - auth_url: {get_param: auth_url} - hyperkube_image: {get_param: hyperkube_image} insecure_registry_url: {get_param: insecure_registry_url} container_runtime: {get_param: container_runtime} - prometheus_monitoring: {get_param: prometheus_monitoring} + container_infra_prefix: {get_param: container_infra_prefix} dns_service_ip: {get_param: dns_service_ip} dns_cluster_domain: {get_param: dns_cluster_domain} - openstack_ca: {get_param: openstack_ca} + openstack_ca: {get_param: openstack_ca_coreos} nodes_server_group_id: {get_resource: nodes_server_group} - octavia_enabled: {get_param: octavia_enabled} + availability_zone: {get_param: availability_zone} + pods_network_cidr: {get_param: pods_network_cidr} + kubelet_options: {get_param: kubelet_options} + kubeproxy_options: {get_param: kubeproxy_options} outputs: @@ -568,6 +789,16 @@ outputs: This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. + registry_address: + value: + str_replace: + template: localhost:port + params: + port: {get_param: registry_port} + description: + This is the url of docker registry server where you can store docker + images. + kube_masters_private: value: {get_attr: [kube_masters, kube_master_ip]} description: > @@ -577,8 +808,7 @@ outputs: value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. - Use these IP addresses to log in to the Kubernetes masters via ssh or to access - the Kubernetes API. + Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions_private: value: {get_attr: [kube_minions, kube_minion_ip]} diff --git a/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml index a25a1d5aee..cc7dba024e 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml @@ -1,9 +1,9 @@ heat_template_version: 2014-10-16 description: > - This is a nested stack that defines a Kubernetes master. This stack is - included by an ResourceGroup resource in the parent template - (kubeclusters.yaml). + This is a nested stack that defines a single Kubernetes master, This stack is + included by an ResourceGroup resource in the parent template + (kubecluster.yaml). parameters: @@ -27,19 +27,6 @@ parameters: type: string description: uuid/name of a network to use for floating ip addresses - discovery_url: - type: string - description: > - Discovery URL used for bootstrapping the etcd cluster. - - api_pool_id: - type: string - description: ID of the load balancer pool of k8s API server. - - etcd_pool_id: - type: string - description: ID of the load balancer pool of etcd server. - portal_network_cidr: type: string description: > @@ -52,6 +39,32 @@ parameters: constraints: - allowed_values: ["true", "false"] + etcd_volume_size: + type: number + description: > + size of a cinder volume to allocate for etcd storage + + docker_volume_size: + type: number + description: > + size of a cinder volume to allocate to docker for container/image + storage + + docker_volume_type: + type: string + description: > + type of a cinder volume to allocate to docker for container/image + storage + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + + volume_driver: + type: string + description: volume driver to use for container storage + flannel_network_cidr: type: string description: network range for flannel overlay network @@ -86,26 +99,10 @@ parameters: description: > List of admission control plugins to activate - fixed_network: + discovery_url: type: string - description: Network from which to allocate fixed addresses. - - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - - secgroup_kube_master_id: - type: string - description: ID of the security group for kubernetes master. - - network_driver: - type: string - description: network driver to use for instantiating container networks + description: > + Discovery URL used for bootstrapping the etcd cluster. tls_disabled: type: boolean @@ -117,7 +114,7 @@ parameters: influx_grafana_dashboard_enabled: type: boolean - description: whether or not to disable kubernetes dashboard + description: Enable influxdb with grafana dashboard for data from heapster verify_ca: type: boolean @@ -128,25 +125,15 @@ parameters: description: > The port which are used by kube-apiserver to provide Kubernetes service. - default: 6443 - - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - kube_dashboard_version: - type: string - description: version of kubernetes dashboard used for kubernetes cluster - - hyperkube_image: - type: string - description: > - Docker registry used for hyperkube image cluster_uuid: type: string description: identifier for the cluster this template is generating + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + prometheus_monitoring: type: boolean description: > @@ -158,10 +145,6 @@ parameters: description: > admin user password for the Grafana monitoring interface - magnum_url: - type: string - description: endpoint to retrieve TLS certs from - api_public_address: type: string description: Public IP address of the Kubernetes master server. @@ -172,6 +155,50 @@ parameters: description: Private IP address of the Kubernetes master server. default: "" + fixed_network: + type: string + description: Network from which to allocate fixed addresses. + + fixed_subnet: + type: string + description: Subnet from which to allocate fixed addresses. + + network_driver: + type: string + description: network driver to use for instantiating container networks + + wait_condition_timeout: + type: number + description : > + timeout for the Wait Conditions + + secgroup_kube_master_id: + type: string + description: ID of the security group for kubernetes master. + + api_pool_id: + type: string + description: ID of the load balancer pool of k8s API server. + + etcd_pool_id: + type: string + description: ID of the load balancer pool of etcd server. + + auth_url: + type: string + description: > + url for kubernetes to authenticate + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password + http_proxy: type: string description: http proxy address for docker @@ -184,35 +211,45 @@ parameters: type: string description: no proxies for docker + kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + + etcd_tag: + type: string + description: tag of the etcd system container + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + + kube_dashboard_version: + type: string + description: version of kubernetes dashboard used for kubernetes cluster + trustee_user_id: type: string description: user id of the trustee - default: "" trustee_password: type: string description: password of the trustee - default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee - default: "" hidden: true - auth_url: - type: string - description: url for keystone - insecure_registry_url: type: string description: insecure registry url - container_runtime: + container_infra_prefix: type: string description: > - Container runtime to use with Kubernetes. + prefix of container images used in the cluster, kubernetes components, + kubernetes-dashboard, coredns etc etcd_lb_vip: type: string @@ -233,18 +270,101 @@ parameters: openstack_ca: type: string description: The OpenStack CA certificate to install on the node. + nodes_server_group_id: type: string description: ID of the server group for kubernetes cluster nodes. + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + + ca_key: + type: string + description: key of internal ca for the kube certificate api manager + hidden: true + + cert_manager_api: + type: boolean + description: true if the kubernetes cert api manager should be enabled + default: false + + calico_tag: + type: string + description: tag of the calico containers used to provision the calico node + + calico_cni_tag: + type: string + description: tag of the cni used to provision the calico node + + calico_kube_controllers_tag: + type: string + description: tag of the kube_controllers used to provision the calico node + + calico_ipv4pool: + type: string + description: Configure the IP pool from which Pod IPs will be chosen + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + ingress_controller: + type: string + description: > + ingress controller backend to use + + ingress_controller_role: + type: string + description: > + node role where the ingress controller should run + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + + kubeapi_options: + type: string + description: > + additional options to be passed to the api + + kubecontroller_options: + type: string + description: > + additional options to be passed to the controller manager + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + + kubescheduler_options: + type: string + description: > + additional options to be passed to the scheduler + octavia_enabled: type: boolean description: > whether or not to use Octavia for LoadBalancer type service. default: False + container_runtime: + type: string + description: > + Container runtime to use with Kubernetes. + + hyperkube_image: + type: string + description: > + Docker registry used for hyperkube image + resources: + master_wait_handle: type: OS::Heat::WaitConditionHandle @@ -288,6 +408,10 @@ resources: "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} + "$ETCD_VOLUME": {get_resource: etcd_volume} + "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} + "$DOCKER_VOLUME": {get_resource: docker_volume} + "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} @@ -303,7 +427,7 @@ resources: "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} - "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: enable_influx_grafana_dashboard} + "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$CLUSTER_UUID": {get_param: cluster_uuid} @@ -330,6 +454,19 @@ resources: "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} "$OCTAVIA_ENABLED": {get_param: octavia_enabled} + write_kubeconfig: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/write-master-kubeconfig.yaml} + + enable_docker_mount: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/enable-docker-mount.yaml} + + add_ext_ca_certs: type: OS::Heat::SoftwareConfig properties: @@ -439,6 +576,8 @@ resources: template: | $add_ext_ca_certs $write_heat_params + $write_kubeconfig + $enable_docker_mount $make_cert $configure_docker $add_proxy @@ -460,6 +599,8 @@ resources: command: "start" - name: "make-cert.service" command: "start" + - name: "enable-docker-mount.service" + command: "start" - name: "configure-docker.service" command: "start" - name: "add-proxy.service" @@ -491,6 +632,8 @@ resources: params: "$add_ext_ca_certs": {get_attr: [add_ext_ca_certs, config]} "$write_heat_params": {get_attr: [write_heat_params, config]} + "$write_kubeconfig": {get_attr: [write_kubeconfig, config]} + "$enable_docker_mount": {get_attr: [enable_docker_mount, config]} "$make_cert": {get_attr: [make_cert, config]} "$configure_docker": {get_attr: [configure_docker, config]} "$add_proxy": {get_attr: [add_proxy, config]} @@ -562,6 +705,44 @@ resources: subnet: { get_param: fixed_subnet } protocol_port: 2379 + ###################################################################### + # + # etcd storage. This allocates a cinder volume and attaches it + # to the master. + # + + etcd_volume: + type: Magnum::Optional::Etcd::Volume + properties: + size: {get_param: etcd_volume_size} + + etcd_volume_attach: + type: Magnum::Optional::Etcd::VolumeAttachment + properties: + instance_uuid: {get_resource: kube-master} + volume_id: {get_resource: etcd_volume} + mountpoint: /dev/vdc + + ###################################################################### + # + # docker storage. This allocates a cinder volume and attaches it + # to the minion. + # + + docker_volume: + type: Magnum::Optional::Cinder::Volume + properties: + size: {get_param: docker_volume_size} + volume_type: {get_param: docker_volume_type} + + docker_volume_attach: + type: Magnum::Optional::Cinder::VolumeAttachment + properties: + instance_uuid: {get_resource: kube-master} + volume_id: {get_resource: docker_volume} + mountpoint: /dev/vdb + + outputs: kube_master_ip: diff --git a/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml index 749c00ab45..2bbea9927f 100644 --- a/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml +++ b/magnum/drivers/k8s_coreos_v1/templates/kubeminion.yaml @@ -1,9 +1,9 @@ heat_template_version: 2014-10-16 description: > - This is a nested stack that defines a single Kubernetes minion, - based on a CoreOS cloud image. This stack is included by a ResourceGroup - resource in the parent template (kubecluster.yaml). + This is a nested stack that defines a single Kubernetes minion, This stack is + included by an AutoScalingGroup resource in the parent template + (kubecluster.yaml). parameters: @@ -34,9 +34,22 @@ parameters: constraints: - allowed_values: ["true", "false"] - network_driver: + docker_volume_size: + type: number + description: > + size of a cinder volume to allocate to docker for container/image + storage + + docker_volume_type: type: string - description: network driver to use for instantiating container networks + description: > + type of a cinder volume to allocate to docker for container/image + storage + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" tls_disabled: type: boolean @@ -51,7 +64,6 @@ parameters: description: > The port which are used by kube-apiserver to provide Kubernetes service. - default: 6443 cluster_uuid: type: string @@ -61,14 +73,10 @@ parameters: type: string description: endpoint to retrieve TLS certs from - kube_version: - type: string - description: version of kubernetes used for kubernetes cluster - - hyperkube_image: - type: string + prometheus_monitoring: + type: boolean description: > - Docker registry used for hyperkube image + whether or not to have the node-exporter running on the node kube_master_ip: type: string @@ -86,19 +94,71 @@ parameters: type: string description: Subnet from which to allocate fixed addresses. + network_driver: + type: string + description: network driver to use for instantiating container networks + flannel_network_cidr: type: string description: network range for flannel overlay network wait_condition_timeout: type: number - description: > + description : > timeout for the Wait Conditions + registry_enabled: + type: boolean + description: > + Indicates whether the docker registry is enabled. + + registry_port: + type: number + description: port of registry service + + swift_region: + type: string + description: region of swift service + + registry_container: + type: string + description: > + name of swift container which docker registry stores images in + + registry_insecure: + type: boolean + description: > + indicates whether to skip TLS verification between registry and backend storage + + registry_chunksize: + type: number + description: > + size fo the data segments for the swift dynamic large objects + secgroup_kube_minion_id: type: string description: ID of the security group for kubernetes minion. + volume_driver: + type: string + description: volume driver to use for container storage + + region_name: + type: string + description: A logically separate section of the cluster + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password, not set in current implementation, only used to + fill in for Kubernetes config file + hidden: true + http_proxy: type: string description: http proxy address for docker @@ -111,40 +171,55 @@ parameters: type: string description: no proxies for docker + kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + + flannel_tag: + type: string + description: tag of the flannel system containers + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + + trustee_domain_id: + type: string + description: domain id of the trustee + trustee_user_id: type: string description: user id of the trustee - default: "" + + trustee_username: + type: string + description: username of the trustee trustee_password: type: string description: password of the trustee - default: "" hidden: true trust_id: type: string description: id of the trust which is used by the trustee - default: "" hidden: true auth_url: type: string - description: url for keystone + description: > + url for keystone, must be v2 since k8s backend only support v2 + at this point insecure_registry_url: type: string description: insecure registry url - container_runtime: + container_infra_prefix: type: string description: > - Container runtime to use with Kubernetes. - - prometheus_monitoring: - type: boolean - description: > - whether or not to have the node-exporter running on the node + prefix of container images used in the cluster, kubernetes components, + kubernetes-dashboard, coredns etc dns_service_ip: type: string @@ -164,14 +239,45 @@ parameters: type: string description: ID of the server group for kubernetes cluster nodes. + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + octavia_enabled: type: boolean description: > whether or not to use Octavia for LoadBalancer type service. default: False + container_runtime: + type: string + description: > + Container runtime to use with Kubernetes. + + hyperkube_image: + type: string + description: > + Docker registry used for hyperkube image + resources: + minion_wait_handle: type: OS::Heat::WaitConditionHandle @@ -197,6 +303,8 @@ resources: template: {get_file: fragments/write-heat-params.yaml} params: "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} + "$DOCKER_VOLUME": {get_resource: docker_volume} + "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} "$KUBE_MASTER_IP": {get_param: kube_master_ip} "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_minion_floating, floating_ip_address]} "$KUBE_NODE_IP": {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} @@ -240,11 +348,23 @@ resources: $OPENSTACK_CA: {get_param: openstack_ca} template: {get_file: fragments/add-ext-ca-certs.yaml} + enable_docker_mount: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/enable-docker-mount.yaml} + write_kubeconfig: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: fragments/write-kubeconfig.yaml} + config: + str_replace: + template: {get_file: fragments/write-kubeconfig.yaml} + params: + "$KUBE_API_PORT": {get_param: kubernetes_port} + "$KUBE_MASTER_IP": {get_param: kube_master_ip} + make_cert: type: OS::Heat::SoftwareConfig @@ -297,6 +417,7 @@ resources: template: | $add_ext_ca_certs $write_heat_params + $enable_docker_mount $write_kubeconfig $make_cert $configure_docker @@ -311,6 +432,8 @@ resources: command: "start" - name: "make-cert.service" command: "start" + - name: "enable-docker-mount.service" + command: "start" - name: "configure-docker.service" command: "start" - name: "add-proxy.service" @@ -327,6 +450,7 @@ resources: "$add_ext_ca_certs": {get_attr: [add_ext_ca_certs, config]} "$write_heat_params": {get_attr: [write_heat_params, config]} "$write_kubeconfig": {get_attr: [write_kubeconfig, config]} + "$enable_docker_mount": {get_attr: [enable_docker_mount, config]} "$make_cert": {get_attr: [make_cert, config]} "$configure_docker": {get_attr: [configure_docker, config]} "$add_proxy": {get_attr: [add_proxy, config]} @@ -369,12 +493,31 @@ resources: floating_network: {get_param: external_network} port_id: {get_resource: kube_minion_eth0} + ###################################################################### + # + # docker storage. This allocates a cinder volume and attaches it + # to the minion. + # + + docker_volume: + type: Magnum::Optional::Cinder::Volume + properties: + size: {get_param: docker_volume_size} + volume_type: {get_param: docker_volume_type} + + docker_volume_attach: + type: Magnum::Optional::Cinder::VolumeAttachment + properties: + instance_uuid: {get_resource: kube-minion} + volume_id: {get_resource: docker_volume} + mountpoint: /dev/vdb + outputs: kube_minion_ip: value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} description: > - This is the "private" IP address of the Kubernetes minion node. + This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} diff --git a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py index a793c0907e..8a6989be1b 100644 --- a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py @@ -518,7 +518,12 @@ class TestClusterConductorWithK8s(base.TestCase): 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', + 'availability_zone': 'az_1', + 'nodes_affinity_policy': 'soft-anti-affinity', 'dns_nameserver': 'dns_nameserver', + 'docker_storage_driver': 'devicemapper', + 'docker_volume_size': 20, + 'docker_volume_type': 'lvmdriver-1', 'server_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', @@ -538,6 +543,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', + 'region_name': 'RegionOne', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', @@ -547,6 +553,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'trustee_username': 'fake_trustee', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', + 'username': 'fake_user', 'trust_id': '', 'auth_url': 'http://192.168.10.10:5000/v3', 'cluster_uuid': self.cluster_dict['uuid'], @@ -555,6 +562,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', + 'openstack_ca_coreos': '', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', @@ -568,6 +576,8 @@ class TestClusterConductorWithK8s(base.TestCase): self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', + '../../common/templates/environments/no_etcd_volume.yaml', + '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml'], env_files) @@ -599,10 +609,14 @@ class TestClusterConductorWithK8s(base.TestCase): expected = { 'ssh_key_name': 'keypair_id', + 'availability_zone': 'az_1', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', + 'docker_storage_driver': u'devicemapper', + 'docker_volume_size': 20, + 'docker_volume_type': u'lvmdriver-1', 'server_image': 'image_id', 'minion_flavor': 'flavor_id', 'master_flavor': 'master_flavor_id', @@ -615,6 +629,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', + 'nodes_affinity_policy': 'soft-anti-affinity', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', @@ -622,6 +637,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'system_pods_timeout': '1', 'admission_control_list': 'fake_list', 'prometheus_monitoring': 'False', + 'region_name': self.mock_osc.cinder_region_name.return_value, 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'influx_grafana_dashboard_enabled': 'True', @@ -629,6 +645,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': 'fake_trustee', + 'username': 'fake_user', 'trustee_password': 'fake_trustee_password', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', @@ -639,6 +656,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', + 'openstack_ca_coreos': '', 'cert_manager_api': 'False', 'ingress_controller': 'i-controller', 'ingress_controller_role': 'i-controller-role', @@ -652,6 +670,8 @@ class TestClusterConductorWithK8s(base.TestCase): self.assertEqual(expected, definition) self.assertEqual( ['../../common/templates/environments/no_private_network.yaml', + '../../common/templates/environments/no_etcd_volume.yaml', + '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/no_master_lb.yaml', '../../common/templates/environments/disable_floating_ip.yaml'], env_files)