Support Fedora CoreOS 30

Add fedora coreos driver. To deploy clusters with fedora coreos operators
or users need to add os_distro=fedora-coreos to the image. The scripts
to deploy kubernetes on top are the same with fedora atomic. Note that
this driver has selinux enabled.

The startup of the heat-container-agent uses a workaround to copy the
SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data.
The fedora coreos driver requires heat train to support ignition.

Task: 29968
Story: 2005201

Signed-off-by: Spyros Trigazis <spyridon.trigazis@cern.ch>

Change-Id: Iffcaa68d385b1b829b577ebce2df465073dfb5a1
This commit is contained in:
Spyros Trigazis 2019-10-07 12:53:27 +00:00
parent 3674b3617a
commit 73dc57c319
16 changed files with 3155 additions and 80 deletions

View File

@ -42,6 +42,9 @@ EOF
}
systemctl restart NetworkManager
fi
elif [ "$NETWORK_DRIVER" = "flannel" ]; then
$ssh_cmd modprobe vxlan
echo "vxlan" > /etc/modules-load.d/vxlan.conf
fi
@ -182,6 +185,7 @@ ExecStart=/bin/bash -c '/usr/bin/podman run --name kubelet \\
--volume /etc/ssl/certs:/etc/ssl/certs:ro \\
--volume /lib/modules:/lib/modules:ro \\
--volume /run:/run \\
--volume /dev:/dev \\
--volume /sys/fs/cgroup:/sys/fs/cgroup:ro \\
--volume /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \\
--volume /etc/pki/tls/certs:/usr/share/ca-certificates:ro \\

View File

@ -44,6 +44,9 @@ EOF
}
$ssh_cmd systemctl restart NetworkManager
fi
elif [ "$NETWORK_DRIVER" = "flannel" ]; then
$ssh_cmd modprobe vxlan
echo "vxlan" > /etc/modules-load.d/vxlan.conf
fi
mkdir -p /srv/magnum/kubernetes/
@ -81,6 +84,7 @@ ExecStart=/bin/bash -c '/usr/bin/podman run --name kubelet \\
--volume /etc/ssl/certs:/etc/ssl/certs:ro \\
--volume /lib/modules:/lib/modules:ro \\
--volume /run:/run \\
--volume /dev:/dev \\
--volume /sys/fs/cgroup:/sys/fs/cgroup:ro \\
--volume /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \\
--volume /etc/pki/tls/certs:/usr/share/ca-certificates:ro \\

View File

@ -175,6 +175,11 @@ spec:
tier: node
app: flannel
spec:
# https://pagure.io/atomic/kubernetes-sig/issue/3
# https://danwalsh.livejournal.com/74754.html
securityContext:
seLinuxOptions:
type: "spc_t"
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64

View File

@ -13,6 +13,7 @@
import abc
import collections
import os
from pbr.version import SemanticVersion as SV
import six
from string import ascii_letters
@ -320,6 +321,86 @@ class KubernetesDriver(HeatDriver):
raise NotImplementedError("Must implement 'upgrade_cluster'")
class FedoraKubernetesDriver(KubernetesDriver):
"""Base driver for Kubernetes clusters."""
def upgrade_cluster(self, context, cluster, cluster_template,
max_batch_size, nodegroup, scale_manager=None,
rollback=False):
osc = clients.OpenStackClients(context)
_, heat_params, _ = (
self._extract_template_definition(context, cluster,
scale_manager=scale_manager))
# Extract labels/tags from cluster not template
# There are some version tags are not decalared in labels explicitly,
# so we need to get them from heat_params based on the labels given in
# new cluster template.
current_addons = {}
new_addons = {}
for label in cluster_template.labels:
# This is upgrade API, so we don't introduce new stuff by this API,
# but just focus on the version change.
new_addons[label] = cluster_template.labels[label]
if ((label.endswith('_tag') or
label.endswith('_version')) and label in heat_params):
current_addons[label] = heat_params[label]
try:
if (SV.from_pip_string(new_addons[label]) <
SV.from_pip_string(current_addons[label])):
raise exception.InvalidVersion(tag=label)
except Exception as e:
# NOTE(flwang): Different cloud providers may use different
# tag/version format which maybe not able to parse by
# SemanticVersion. For this case, let's just skip it.
LOG.debug("Failed to parse tag/version %s", str(e))
heat_params["master_image"] = cluster_template.image_id
heat_params["minion_image"] = cluster_template.image_id
# NOTE(flwang): Overwrite the kube_tag as well to avoid a server
# rebuild then do the k8s upgrade again, when both image id and
# kube_tag changed
heat_params["kube_tag"] = cluster_template.labels["kube_tag"]
heat_params["kube_version"] = cluster_template.labels["kube_tag"]
heat_params["master_kube_tag"] = cluster_template.labels["kube_tag"]
heat_params["minion_kube_tag"] = cluster_template.labels["kube_tag"]
heat_params["update_max_batch_size"] = max_batch_size
# Rules: 1. No downgrade 2. Explicitly override 3. Merging based on set
# Update heat_params based on the data generated above
del heat_params['kube_service_account_private_key']
del heat_params['kube_service_account_key']
for label in new_addons:
heat_params[label] = cluster_template.labels[label]
cluster['cluster_template_id'] = cluster_template.uuid
new_labels = cluster.labels.copy()
new_labels.update(cluster_template.labels)
cluster['labels'] = new_labels
fields = {
'existing': True,
'parameters': heat_params,
'disable_rollback': not rollback
}
osc.heat().stacks.update(cluster.stack_id, **fields)
def get_nodegroup_extra_params(self, cluster, osc):
network = osc.heat().resources.get(cluster.stack_id, 'network')
secgroup = osc.heat().resources.get(cluster.stack_id,
'secgroup_kube_minion')
for output in osc.heat().stacks.get(cluster.stack_id).outputs:
if output['output_key'] == 'api_address':
api_address = output['output_value']
break
extra_params = {
'existing_master_private_ip': api_address,
'existing_security_group': secgroup.attributes['id'],
'fixed_network': network.attributes['fixed_network'],
'fixed_subnet': network.attributes['fixed_subnet'],
}
return extra_params
class HeatPoller(object):
def __init__(self, openstack_client, context, cluster, cluster_driver):

View File

@ -13,17 +13,14 @@
# under the License.
from oslo_log import log as logging
from pbr.version import SemanticVersion as SV
from magnum.common import clients
from magnum.common import exception
from magnum.drivers.heat import driver
from magnum.drivers.k8s_fedora_atomic_v1 import template_def
LOG = logging.getLogger(__name__)
class Driver(driver.KubernetesDriver):
class Driver(driver.FedoraKubernetesDriver):
@property
def provides(self):
@ -35,79 +32,3 @@ class Driver(driver.KubernetesDriver):
def get_template_definition(self):
return template_def.AtomicK8sTemplateDefinition()
def upgrade_cluster(self, context, cluster, cluster_template,
max_batch_size, nodegroup, scale_manager=None,
rollback=False):
osc = clients.OpenStackClients(context)
_, heat_params, _ = (
self._extract_template_definition(context, cluster,
scale_manager=scale_manager))
# Extract labels/tags from cluster not template
# There are some version tags are not decalared in labels explicitly,
# so we need to get them from heat_params based on the labels given in
# new cluster template.
current_addons = {}
new_addons = {}
for label in cluster_template.labels:
# This is upgrade API, so we don't introduce new stuff by this API,
# but just focus on the version change.
new_addons[label] = cluster_template.labels[label]
if ((label.endswith('_tag') or
label.endswith('_version')) and label in heat_params):
current_addons[label] = heat_params[label]
try:
if (SV.from_pip_string(new_addons[label]) <
SV.from_pip_string(current_addons[label])):
raise exception.InvalidVersion(tag=label)
except Exception as e:
# NOTE(flwang): Different cloud providers may use different
# tag/version format which maybe not able to parse by
# SemanticVersion. For this case, let's just skip it.
LOG.debug("Failed to parse tag/version %s", str(e))
heat_params["master_image"] = cluster_template.image_id
heat_params["minion_image"] = cluster_template.image_id
# NOTE(flwang): Overwrite the kube_tag as well to avoid a server
# rebuild then do the k8s upgrade again, when both image id and
# kube_tag changed
heat_params["kube_tag"] = cluster_template.labels["kube_tag"]
heat_params["kube_version"] = cluster_template.labels["kube_tag"]
heat_params["master_kube_tag"] = cluster_template.labels["kube_tag"]
heat_params["minion_kube_tag"] = cluster_template.labels["kube_tag"]
heat_params["update_max_batch_size"] = max_batch_size
# Rules: 1. No downgrade 2. Explicitly override 3. Merging based on set
# Update heat_params based on the data generated above
del heat_params['kube_service_account_private_key']
del heat_params['kube_service_account_key']
for label in new_addons:
heat_params[label] = cluster_template.labels[label]
cluster['cluster_template_id'] = cluster_template.uuid
new_labels = cluster.labels.copy()
new_labels.update(cluster_template.labels)
cluster['labels'] = new_labels
fields = {
'existing': True,
'parameters': heat_params,
'disable_rollback': not rollback
}
osc.heat().stacks.update(cluster.stack_id, **fields)
def get_nodegroup_extra_params(self, cluster, osc):
network = osc.heat().resources.get(cluster.stack_id, 'network')
secgroup = osc.heat().resources.get(cluster.stack_id,
'secgroup_kube_minion')
for output in osc.heat().stacks.get(cluster.stack_id).outputs:
if output['output_key'] == 'api_address':
api_address = output['output_value']
break
extra_params = {
'existing_master_private_ip': api_address,
'existing_security_group': secgroup.attributes['id'],
'fixed_network': network.attributes['fixed_network'],
'fixed_subnet': network.attributes['fixed_subnet'],
}
return extra_params

View File

@ -0,0 +1,32 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from magnum.drivers.heat import driver
from magnum.drivers.k8s_fedora_coreos_v1 import template_def
LOG = logging.getLogger(__name__)
class Driver(driver.FedoraKubernetesDriver):
@property
def provides(self):
return [
{'server_type': 'vm',
'os': 'fedora-coreos',
'coe': 'kubernetes'},
]
def get_template_definition(self):
return template_def.FCOSK8sTemplateDefinition()

View File

@ -0,0 +1,45 @@
# Copyright 2016 Rackspace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six.moves.urllib.parse as urlparse
from magnum.common import utils
import magnum.conf
from magnum.drivers.heat import k8s_fedora_template_def as kftd
CONF = magnum.conf.CONF
class FCOSK8sTemplateDefinition(kftd.K8sFedoraTemplateDefinition):
"""Kubernetes template for a Fedora Atomic VM."""
@property
def driver_module_path(self):
return __name__[:__name__.rindex('.')]
@property
def template_path(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates/kubecluster.yaml')
def get_params(self, context, cluster_template, cluster, **kwargs):
extra_params = super(FCOSK8sTemplateDefinition,
self).get_params(context,
cluster_template,
cluster,
**kwargs)
extra_params['openstack_ca'] = urlparse.quote(
utils.get_openstack_ca())
return extra_params

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,853 @@
heat_template_version: queens
description: >
This is a nested stack that defines a single Kubernetes master, This stack is
included by an ResourceGroup resource in the parent template
(kubecluster.yaml).
parameters:
name:
type: string
description: server name
server_image:
type: string
description: glance image used to boot the server
master_flavor:
type: string
description: flavor to use when booting the server
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
ssh_public_key:
type: string
description: The public ssh key to add in all nodes
external_network:
type: string
description: uuid of a network to use for floating ip addresses
portal_network_cidr:
type: string
description: >
address range used by kubernetes for service portals
kube_allow_priv:
type: string
description: >
whether or not kubernetes should permit privileged containers.
constraints:
- allowed_values: ["true", "false"]
boot_volume_size:
type: number
description: >
size of the cinder boot volume for nodes root volume
default: 0
boot_volume_type:
type: string
description: >
type of the cinder boot volume for nodes root volume
etcd_volume_size:
type: number
description: >
size of a cinder volume to allocate for etcd storage
etcd_volume_type:
type: string
description: >
type of a cinder volume to allocate for etcd storage
docker_volume_size:
type: number
description: >
size of a cinder volume to allocate to docker for container/image
storage
docker_volume_type:
type: string
description: >
type of a cinder volume to allocate to docker for container/image
storage
docker_storage_driver:
type: string
description: docker storage driver name
default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "cgroupfs"
volume_driver:
type: string
description: volume driver to use for container storage
region_name:
type: string
description: A logically separate section of the cluster
flannel_network_cidr:
type: string
description: network range for flannel overlay network
flannel_network_subnetlen:
type: number
description: size of subnet assigned to each master
flannel_backend:
type: string
description: >
specify the backend for flannel, default udp backend
constraints:
- allowed_values: ["udp", "vxlan", "host-gw"]
system_pods_initial_delay:
type: number
description: >
health check, time to wait for system pods (podmaster, scheduler) to boot
(in seconds)
default: 30
system_pods_timeout:
type: number
description: >
health check, timeout for system pods (podmaster, scheduler) to answer.
(in seconds)
default: 5
admission_control_list:
type: string
description: >
List of admission control plugins to activate
discovery_url:
type: string
description: >
Discovery URL used for bootstrapping the etcd cluster.
tls_disabled:
type: boolean
description: whether or not to enable TLS
traefik_ingress_controller_tag:
type: string
description: tag of the traefik containers to be used.
kube_dashboard_enabled:
type: boolean
description: whether or not to disable kubernetes dashboard
influx_grafana_dashboard_enabled:
type: boolean
description: Enable influxdb with grafana dashboard for data from heapster
verify_ca:
type: boolean
description: whether or not to validate certificate authority
kubernetes_port:
type: number
description: >
The port which are used by kube-apiserver to provide Kubernetes
service.
cluster_uuid:
type: string
description: identifier for the cluster this template is generating
magnum_url:
type: string
description: endpoint to retrieve TLS certs from
prometheus_monitoring:
type: boolean
description: >
whether or not to have prometheus and grafana deployed
grafana_admin_passwd:
type: string
hidden: true
description: >
admin user password for the Grafana monitoring interface
api_public_address:
type: string
description: Public IP address of the Kubernetes master server.
default: ""
api_private_address:
type: string
description: Private IP address of the Kubernetes master server.
default: ""
fixed_network:
type: string
description: Network from which to allocate fixed addresses.
fixed_network_name:
type: string
description: Network from which to allocate fixed addresses.
fixed_subnet:
type: string
description: Subnet from which to allocate fixed addresses.
network_driver:
type: string
description: network driver to use for instantiating container networks
secgroup_kube_master_id:
type: string
description: ID of the security group for kubernetes master.
api_pool_id:
type: string
description: ID of the load balancer pool of k8s API server.
etcd_pool_id:
type: string
description: ID of the load balancer pool of etcd server.
auth_url:
type: string
description: >
url for kubernetes to authenticate
username:
type: string
description: >
user account
password:
type: string
description: >
user password
http_proxy:
type: string
description: http proxy address for docker
https_proxy:
type: string
description: https proxy address for docker
no_proxy:
type: string
description: no proxies for docker
kube_tag:
type: string
description: tag of the k8s containers used to provision the kubernetes cluster
cloud_provider_tag:
type: string
description:
tag of the kubernetes/cloud-provider-openstack
https://hub.docker.com/r/k8scloudprovider/openstack-cloud-controller-manager/tags/
cloud_provider_enabled:
type: boolean
description: Enable or disable the openstack kubernetes cloud provider
etcd_tag:
type: string
description: tag of the etcd system container
coredns_tag:
type: string
description: tag of the coredns container
flannel_tag:
type: string
description: tag of the flannel system containers
flannel_cni_tag:
type: string
description: tag of the flannel cni container
kube_version:
type: string
description: version of kubernetes used for kubernetes cluster
kube_dashboard_version:
type: string
description: version of kubernetes dashboard used for kubernetes cluster
trustee_user_id:
type: string
description: user id of the trustee
trustee_password:
type: string
description: password of the trustee
hidden: true
trust_id:
type: string
description: id of the trust which is used by the trustee
hidden: true
insecure_registry_url:
type: string
description: insecure registry url
container_infra_prefix:
type: string
description: >
prefix of container images used in the cluster, kubernetes components,
kubernetes-dashboard, coredns etc
etcd_lb_vip:
type: string
description: >
etcd lb vip private used to generate certs on master.
default: ""
dns_service_ip:
type: string
description: >
address used by Kubernetes DNS service
dns_cluster_domain:
type: string
description: >
domain name for cluster DNS
openstack_ca:
type: string
description: The OpenStack CA certificate to install on the node.
nodes_server_group_id:
type: string
description: ID of the server group for kubernetes cluster nodes.
availability_zone:
type: string
description: >
availability zone for master and nodes
default: ""
ca_key:
type: string
description: key of internal ca for the kube certificate api manager
hidden: true
cert_manager_api:
type: boolean
description: true if the kubernetes cert api manager should be enabled
default: false
calico_tag:
type: string
description: tag of the calico containers used to provision the calico node
calico_kube_controllers_tag:
type: string
description: tag of the kube_controllers used to provision the calico node
calico_ipv4pool:
type: string
description: Configure the IP pool from which Pod IPs will be chosen
pods_network_cidr:
type: string
description: Configure the IP pool/range from which pod IPs will be chosen
ingress_controller:
type: string
description: >
ingress controller backend to use
ingress_controller_role:
type: string
description: >
node role where the ingress controller should run
octavia_ingress_controller_tag:
type: string
description: Octavia ingress controller docker image tag.
kubelet_options:
type: string
description: >
additional options to be passed to the kubelet
kubeapi_options:
type: string
description: >
additional options to be passed to the api
kubecontroller_options:
type: string
description: >
additional options to be passed to the controller manager
kubeproxy_options:
type: string
description: >
additional options to be passed to the kube proxy
kubescheduler_options:
type: string
description: >
additional options to be passed to the scheduler
octavia_enabled:
type: boolean
description: >
whether or not to use Octavia for LoadBalancer type service.
default: False
kube_service_account_key:
type: string
hidden: true
description: >
The signed cert will be used to verify the k8s service account tokens
during authentication.
kube_service_account_private_key:
type: string
hidden: true
description: >
The private key will be used to sign generated k8s service account
tokens.
prometheus_tag:
type: string
description: tag of prometheus container
grafana_tag:
type: string
description: tag of grafana container
heat_container_agent_tag:
type: string
description: tag of the heat_container_agent system container
keystone_auth_enabled:
type: boolean
description: >
true if the keystone authN and authZ should be enabled
default:
false
k8s_keystone_auth_tag:
type: string
description: tag of the k8s_keystone_auth container
monitoring_enabled:
type: boolean
description: Enable or disable prometheus-operator monitoring solution.
default: false
prometheus_operator_chart_tag:
type: string
description: The stable/prometheus-operator chart version to use.
default: 5.12.3
project_id:
type: string
description: >
project id of current project
tiller_enabled:
type: string
description: Whether to enable tiller or not
tiller_tag:
type: string
description: tag of tiller container
tiller_namespace:
type: string
description: namespace where tiller will be installed
auto_healing_enabled:
type: boolean
description: >
true if the auto healing feature should be enabled
auto_healing_controller:
type: string
description: >
The service to be deployed for auto-healing.
default: "draino"
magnum_auto_healer_tag:
type: string
description: tag of the magnum-auto-healer service.
default: "v1.15.0"
auto_scaling_enabled:
type: boolean
description: >
true if the auto scaling feature should be enabled
node_problem_detector_tag:
type: string
description: tag of the node problem detector container
nginx_ingress_controller_tag:
type: string
description: nginx ingress controller docker image tag
draino_tag:
type: string
description: tag of the draino container
autoscaler_tag:
type: string
description: tag of the autoscaler container
min_node_count:
type: number
description: >
minimum node count of cluster workers when doing scale down
max_node_count:
type: number
description: >
maximum node count of cluster workers when doing scale up
npd_enabled:
type: boolean
description: >
true if the npd service should be launched
default:
true
conditions:
image_based: {equals: [{get_param: boot_volume_size}, 0]}
volume_based:
not:
equals:
- get_param: boot_volume_size
- 0
resources:
######################################################################
#
# resource that exposes the IPs of either the kube master or the API
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
#
api_address_switch:
type: Magnum::ApiGatewaySwitcher
properties:
pool_public_ip: {get_param: api_public_address}
pool_private_ip: {get_param: api_private_address}
master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]}
master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
######################################################################
#
# software configs. these are components that are combined into
# a multipart MIME user-data archive.
#
agent_config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
list_join:
- "\n"
-
- str_replace:
template: {get_file: user_data.json}
params:
$HOSTNAME: {get_param: name}
$SSH_KEY_VALUE: {get_param: ssh_public_key}
$OPENSTACK_CA: {get_param: openstack_ca}
master_config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
list_join:
- "\n"
-
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.sh}
params:
"$INSTANCE_NAME": {get_param: name}
"$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring}
"$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]}
"$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]}
"$KUBE_API_PORT": {get_param: kubernetes_port}
"$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]}
"$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
"$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv}
"$ETCD_VOLUME": {get_resource: etcd_volume}
"$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size}
"$DOCKER_VOLUME": {get_resource: docker_volume}
"$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size}
"$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver}
"$CGROUP_DRIVER": {get_param: cgroup_driver}
"$NETWORK_DRIVER": {get_param: network_driver}
"$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr}
"$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen}
"$FLANNEL_BACKEND": {get_param: flannel_backend}
"$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay}
"$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout}
"$PODS_NETWORK_CIDR": {get_param: pods_network_cidr}
"$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr}
"$ADMISSION_CONTROL_LIST": {get_param: admission_control_list}
"$ETCD_DISCOVERY_URL": {get_param: discovery_url}
"$AUTH_URL": {get_param: auth_url}
"$USERNAME": {get_param: username}
"$PASSWORD": {get_param: password}
"$CLUSTER_NETWORK": {get_param: fixed_network}
"$CLUSTER_NETWORK_NAME": {get_param: fixed_network_name}
"$CLUSTER_SUBNET": {get_param: fixed_subnet}
"$TLS_DISABLED": {get_param: tls_disabled}
"$TRAEFIK_INGRESS_CONTROLLER_TAG": {get_param: traefik_ingress_controller_tag}
"$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled}
"$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled}
"$VERIFY_CA": {get_param: verify_ca}
"$CLUSTER_UUID": {get_param: cluster_uuid}
"$MAGNUM_URL": {get_param: magnum_url}
"$VOLUME_DRIVER": {get_param: volume_driver}
"$REGION_NAME": {get_param: region_name}
"$HTTP_PROXY": {get_param: http_proxy}
"$HTTPS_PROXY": {get_param: https_proxy}
"$NO_PROXY": {get_param: no_proxy}
"$KUBE_TAG": {get_param: kube_tag}
"$CLOUD_PROVIDER_TAG": {get_param: cloud_provider_tag}
"$CLOUD_PROVIDER_ENABLED": {get_param: cloud_provider_enabled}
"$ETCD_TAG": {get_param: etcd_tag}
"$COREDNS_TAG": {get_param: coredns_tag}
"$FLANNEL_TAG": {get_param: flannel_tag}
"$FLANNEL_CNI_TAG": {get_param: flannel_cni_tag}
"$KUBE_VERSION": {get_param: kube_version}
"$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version}
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
"$TRUST_ID": {get_param: trust_id}
"$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url}
"$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix}
"$ETCD_LB_VIP": {get_param: etcd_lb_vip}
"$DNS_SERVICE_IP": {get_param: dns_service_ip}
"$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain}
"$CERT_MANAGER_API": {get_param: cert_manager_api}
"$CA_KEY": {get_param: ca_key}
"$CALICO_TAG": {get_param: calico_tag}
"$CALICO_KUBE_CONTROLLERS_TAG": {get_param: calico_kube_controllers_tag}
"$CALICO_IPV4POOL": {get_param: calico_ipv4pool}
"$INGRESS_CONTROLLER": {get_param: ingress_controller}
"$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role}
"$OCTAVIA_INGRESS_CONTROLLER_TAG": {get_param: octavia_ingress_controller_tag}
"$KUBELET_OPTIONS": {get_param: kubelet_options}
"$KUBEAPI_OPTIONS": {get_param: kubeapi_options}
"$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options}
"$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options}
"$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options}
"$OCTAVIA_ENABLED": {get_param: octavia_enabled}
"$KUBE_SERVICE_ACCOUNT_KEY": {get_param: kube_service_account_key}
"$KUBE_SERVICE_ACCOUNT_PRIVATE_KEY": {get_param: kube_service_account_private_key}
"$PROMETHEUS_TAG": {get_param: prometheus_tag}
"$GRAFANA_TAG": {get_param: grafana_tag}
"$HEAT_CONTAINER_AGENT_TAG": {get_param: heat_container_agent_tag}
"$KEYSTONE_AUTH_ENABLED": {get_param: keystone_auth_enabled}
"$K8S_KEYSTONE_AUTH_TAG": {get_param: k8s_keystone_auth_tag}
"$MONITORING_ENABLED": {get_param: monitoring_enabled}
"$PROMETHEUS_OPERATOR_CHART_TAG": {get_param: prometheus_operator_chart_tag}
"$PROJECT_ID": {get_param: project_id}
"$EXTERNAL_NETWORK_ID": {get_param: external_network}
"$TILLER_ENABLED": {get_param: tiller_enabled}
"$TILLER_TAG": {get_param: tiller_tag}
"$TILLER_NAMESPACE": {get_param: tiller_namespace}
"$NODE_PROBLEM_DETECTOR_TAG": {get_param: node_problem_detector_tag}
"$NGINX_INGRESS_CONTROLLER_TAG": {get_param: nginx_ingress_controller_tag}
"$AUTO_HEALING_ENABLED": {get_param: auto_healing_enabled}
"$AUTO_HEALING_CONTROLLER": {get_param: auto_healing_controller}
"$MAGNUM_AUTO_HEALER_TAG": {get_param: magnum_auto_healer_tag}
"$AUTO_SCALING_ENABLED": {get_param: auto_scaling_enabled}
"$DRAINO_TAG": {get_param: draino_tag}
"$AUTOSCALER_TAG": {get_param: autoscaler_tag}
"$MIN_NODE_COUNT": {get_param: min_node_count}
"$MAX_NODE_COUNT": {get_param: max_node_count}
"$NPD_ENABLED": {get_param: npd_enabled}
- get_file: ../../common/templates/kubernetes/fragments/make-cert.sh
- get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh
- get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh
# TODO add docker_storage_setup
- get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh
- get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh
master_config_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: master_config}
server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
actions: ['CREATE']
######################################################################
#
# a single kubernetes master.
#
kube_node_volume:
type: OS::Cinder::Volume
condition: volume_based
properties:
image: {get_param: server_image}
size: {get_param: boot_volume_size}
volume_type: {get_param: boot_volume_type}
# do NOT use "_" (underscore) in the Nova server name
# it creates a mismatch between the generated Nova name and its hostname
# which can lead to weird problems
kube-master:
type: OS::Nova::Server
condition: image_based
properties:
name: {get_param: name}
image: {get_param: server_image}
flavor: {get_param: master_flavor}
user_data_format: SOFTWARE_CONFIG
software_config_transport: POLL_SERVER_HEAT
user_data: {get_resource: agent_config}
networks:
- port: {get_resource: kube_master_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
kube-master-bfv:
type: OS::Nova::Server
condition: volume_based
properties:
name: {get_param: name}
flavor: {get_param: master_flavor}
user_data_format: SOFTWARE_CONFIG
software_config_transport: POLL_SERVER_HEAT
user_data: {get_resource: agent_config}
networks:
- port: {get_resource: kube_master_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
block_device_mapping_v2:
- boot_index: 0
volume_id: {get_resource: kube_node_volume}
delete_on_termination: true
kube_master_eth0:
type: OS::Neutron::Port
properties:
network: {get_param: fixed_network}
security_groups:
- {get_param: secgroup_kube_master_id}
fixed_ips:
- subnet: {get_param: fixed_subnet}
allowed_address_pairs:
- ip_address: {get_param: pods_network_cidr}
replacement_policy: AUTO
kube_master_floating:
type: Magnum::Optional::KubeMaster::Neutron::FloatingIP
properties:
floating_network: {get_param: external_network}
port_id: {get_resource: kube_master_eth0}
depends_on: kube-master
api_pool_member:
type: Magnum::Optional::Neutron::LBaaS::PoolMember
properties:
pool: {get_param: api_pool_id}
address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
subnet: { get_param: fixed_subnet }
protocol_port: {get_param: kubernetes_port}
etcd_pool_member:
type: Magnum::Optional::Neutron::LBaaS::PoolMember
properties:
pool: {get_param: etcd_pool_id}
address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
subnet: { get_param: fixed_subnet }
protocol_port: 2379
######################################################################
#
# etcd storage. This allocates a cinder volume and attaches it
# to the master.
#
etcd_volume:
type: Magnum::Optional::Etcd::Volume
properties:
size: {get_param: etcd_volume_size}
volume_type: {get_param: etcd_volume_type}
etcd_volume_attach:
type: Magnum::Optional::Etcd::VolumeAttachment
properties:
instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
volume_id: {get_resource: etcd_volume}
mountpoint: /dev/vdc
######################################################################
#
# docker storage. This allocates a cinder volume and attaches it
# to the minion.
#
docker_volume:
type: Magnum::Optional::Cinder::Volume
properties:
size: {get_param: docker_volume_size}
volume_type: {get_param: docker_volume_type}
docker_volume_attach:
type: Magnum::Optional::Cinder::VolumeAttachment
properties:
instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
volume_id: {get_resource: docker_volume}
mountpoint: /dev/vdb
upgrade_kubernetes:
type: OS::Heat::SoftwareConfig
properties:
group: script
inputs:
- name: kube_tag_input
config:
get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh
upgrade_kubernetes_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: upgrade_kubernetes}
server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
actions: ['UPDATE']
input_values:
kube_tag_input: {get_param: kube_tag}
outputs:
OS::stack_id:
value: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
kube_master_ip:
value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
description: >
This is the "private" IP address of the Kubernetes master node.
kube_master_external_ip:
value: {get_attr: [kube_master_floating, floating_ip_address]}
description: >
This is the "public" IP address of the Kubernetes master node.

View File

@ -0,0 +1,553 @@
heat_template_version: queens
description: >
This is a nested stack that defines a single Kubernetes minion, This stack is
included by an AutoScalingGroup resource in the parent template
(kubecluster.yaml).
parameters:
name:
type: string
description: server name
server_image:
type: string
description: glance image used to boot the server
minion_flavor:
type: string
description: flavor to use when booting the server
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
ssh_public_key:
type: string
description: name of ssh key to be provisioned on our server
external_network:
type: string
description: uuid/name of a network to use for floating ip addresses
kube_allow_priv:
type: string
description: >
whether or not kubernetes should permit privileged containers.
constraints:
- allowed_values: ["true", "false"]
boot_volume_size:
type: number
description: >
size of the cinder boot volume
boot_volume_type:
type: string
description: >
type of the cinder boot volume
docker_volume_size:
type: number
description: >
size of a cinder volume to allocate to docker for container/image
storage
docker_volume_type:
type: string
description: >
type of a cinder volume to allocate to docker for container/image
storage
docker_storage_driver:
type: string
description: docker storage driver name
default: "devicemapper"
cgroup_driver:
type: string
description: >
cgroup driver name that kubelet should use, ideally the same as
the docker cgroup driver.
default: "cgroupfs"
tls_disabled:
type: boolean
description: whether or not to enable TLS
verify_ca:
type: boolean
description: whether or not to validate certificate authority
kubernetes_port:
type: number
description: >
The port which are used by kube-apiserver to provide Kubernetes
service.
cluster_uuid:
type: string
description: identifier for the cluster this template is generating
magnum_url:
type: string
description: endpoint to retrieve TLS certs from
prometheus_monitoring:
type: boolean
description: >
whether or not to have the node-exporter running on the node
kube_master_ip:
type: string
description: IP address of the Kubernetes master server.
etcd_server_ip:
type: string
description: IP address of the Etcd server.
fixed_network:
type: string
description: Network from which to allocate fixed addresses.
fixed_subnet:
type: string
description: Subnet from which to allocate fixed addresses.
network_driver:
type: string
description: network driver to use for instantiating container networks
flannel_network_cidr:
type: string
description: network range for flannel overlay network
wait_condition_timeout:
type: number
description : >
timeout for the Wait Conditions
registry_enabled:
type: boolean
description: >
Indicates whether the docker registry is enabled.
registry_port:
type: number
description: port of registry service
swift_region:
type: string
description: region of swift service
registry_container:
type: string
description: >
name of swift container which docker registry stores images in
registry_insecure:
type: boolean
description: >
indicates whether to skip TLS verification between registry and backend storage
registry_chunksize:
type: number
description: >
size fo the data segments for the swift dynamic large objects
secgroup_kube_minion_id:
type: string
description: ID of the security group for kubernetes minion.
volume_driver:
type: string
description: volume driver to use for container storage
region_name:
type: string
description: A logically separate section of the cluster
username:
type: string
description: >
user account
password:
type: string
description: >
user password, not set in current implementation, only used to
fill in for Kubernetes config file
hidden: true
http_proxy:
type: string
description: http proxy address for docker
https_proxy:
type: string
description: https proxy address for docker
no_proxy:
type: string
description: no proxies for docker
kube_tag:
type: string
description: tag of the k8s containers used to provision the kubernetes cluster
kube_version:
type: string
description: version of kubernetes used for kubernetes cluster
trustee_domain_id:
type: string
description: domain id of the trustee
trustee_user_id:
type: string
description: user id of the trustee
trustee_username:
type: string
description: username of the trustee
trustee_password:
type: string
description: password of the trustee
hidden: true
trust_id:
type: string
description: id of the trust which is used by the trustee
hidden: true
auth_url:
type: string
description: >
url for keystone, must be v2 since k8s backend only support v2
at this point
insecure_registry_url:
type: string
description: insecure registry url
container_infra_prefix:
type: string
description: >
prefix of container images used in the cluster, kubernetes components,
kubernetes-dashboard, coredns etc
dns_service_ip:
type: string
description: >
address used by Kubernetes DNS service
dns_cluster_domain:
type: string
description: >
domain name for cluster DNS
openstack_ca:
type: string
description: The OpenStack CA certificate to install on the node.
nodes_server_group_id:
type: string
description: ID of the server group for kubernetes cluster nodes.
availability_zone:
type: string
description: >
availability zone for master and nodes
default: ""
pods_network_cidr:
type: string
description: Configure the IP pool/range from which pod IPs will be chosen
kubelet_options:
type: string
description: >
additional options to be passed to the kubelet
kubeproxy_options:
type: string
description: >
additional options to be passed to the kube proxy
octavia_enabled:
type: boolean
description: >
whether or not to use Octavia for LoadBalancer type service.
default: False
cloud_provider_enabled:
type: boolean
description: Enable or disable the openstack kubernetes cloud provider
heat_container_agent_tag:
type: string
description: tag of the heat_container_agent system container
auto_healing_enabled:
type: boolean
description: >
true if the auto healing feature should be enabled
auto_healing_controller:
type: string
description: >
The service to be deployed for auto-healing.
default: "draino"
npd_enabled:
type: boolean
description: >
true if the npd service should be launched
default:
true
conditions:
image_based: {equals: [{get_param: boot_volume_size}, 0]}
volume_based:
not:
equals:
- get_param: boot_volume_size
- 0
resources:
agent_config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
list_join:
- "\n"
-
- str_replace:
template: {get_file: user_data.json}
params:
$HOSTNAME: {get_param: name}
$SSH_KEY_VALUE: {get_param: ssh_public_key}
$OPENSTACK_CA: {get_param: openstack_ca}
######################################################################
#
# software configs. these are components that are combined into
# a multipart MIME user-data archive.
#
node_config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
list_join:
- "\n"
-
- str_replace:
template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.sh}
params:
$INSTANCE_NAME: {get_param: name}
$PROMETHEUS_MONITORING: {get_param: prometheus_monitoring}
$KUBE_ALLOW_PRIV: {get_param: kube_allow_priv}
$KUBE_MASTER_IP: {get_param: kube_master_ip}
$KUBE_API_PORT: {get_param: kubernetes_port}
$KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]}
$KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]}
$ETCD_SERVER_IP: {get_param: etcd_server_ip}
$DOCKER_VOLUME: {get_resource: docker_volume}
$DOCKER_VOLUME_SIZE: {get_param: docker_volume_size}
$DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver}
$CGROUP_DRIVER: {get_param: cgroup_driver}
$NETWORK_DRIVER: {get_param: network_driver}
$REGISTRY_ENABLED: {get_param: registry_enabled}
$REGISTRY_PORT: {get_param: registry_port}
$SWIFT_REGION: {get_param: swift_region}
$REGISTRY_CONTAINER: {get_param: registry_container}
$REGISTRY_INSECURE: {get_param: registry_insecure}
$REGISTRY_CHUNKSIZE: {get_param: registry_chunksize}
$TLS_DISABLED: {get_param: tls_disabled}
$VERIFY_CA: {get_param: verify_ca}
$CLUSTER_UUID: {get_param: cluster_uuid}
$MAGNUM_URL: {get_param: magnum_url}
$USERNAME: {get_param: username}
$PASSWORD: {get_param: password}
$VOLUME_DRIVER: {get_param: volume_driver}
$REGION_NAME: {get_param: region_name}
$HTTP_PROXY: {get_param: http_proxy}
$HTTPS_PROXY: {get_param: https_proxy}
$NO_PROXY: {get_param: no_proxy}
$KUBE_TAG: {get_param: kube_tag}
$FLANNEL_NETWORK_CIDR: {get_param: flannel_network_cidr}
$PODS_NETWORK_CIDR: {get_param: pods_network_cidr}
$KUBE_VERSION: {get_param: kube_version}
$TRUSTEE_USER_ID: {get_param: trustee_user_id}
$TRUSTEE_PASSWORD: {get_param: trustee_password}
$TRUST_ID: {get_param: trust_id}
$AUTH_URL: {get_param: auth_url}
$CLOUD_PROVIDER_ENABLED: {get_param: cloud_provider_enabled}
$INSECURE_REGISTRY_URL: {get_param: insecure_registry_url}
$CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix}
$DNS_SERVICE_IP: {get_param: dns_service_ip}
$DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain}
$KUBELET_OPTIONS: {get_param: kubelet_options}
$KUBEPROXY_OPTIONS: {get_param: kubeproxy_options}
$OCTAVIA_ENABLED: {get_param: octavia_enabled}
$HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag}
$AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled}
$AUTO_HEALING_CONTROLLER: {get_param: auto_healing_controller}
$NPD_ENABLED: {get_param: npd_enabled}
- get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh
- get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh
- get_file: ../../common/templates/fragments/configure-docker-registry.sh
- get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh
- get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh
# TODO add docker_storage_setup
- get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh
- get_file: ../../common/templates/fragments/enable-docker-registry.sh
node_config_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: node_config}
server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
actions: ['CREATE']
######################################################################
#
# a single kubernetes minion.
#
kube_node_volume:
type: OS::Cinder::Volume
condition: volume_based
properties:
image: {get_param: server_image}
size: {get_param: boot_volume_size}
volume_type: {get_param: boot_volume_type}
# do NOT use "_" (underscore) in the Nova server name
# it creates a mismatch between the generated Nova name and its hostname
# which can lead to weird problems
kube-minion:
condition: image_based
type: OS::Nova::Server
properties:
name: {get_param: name}
flavor: {get_param: minion_flavor}
image: {get_param: server_image}
user_data: {get_resource: agent_config}
user_data_format: SOFTWARE_CONFIG
software_config_transport: POLL_SERVER_HEAT
networks:
- port: {get_resource: kube_minion_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
kube-minion-bfv:
condition: volume_based
type: OS::Nova::Server
properties:
name: {get_param: name}
flavor: {get_param: minion_flavor}
user_data: {get_resource: agent_config}
user_data_format: SOFTWARE_CONFIG
software_config_transport: POLL_SERVER_HEAT
networks:
- port: {get_resource: kube_minion_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
block_device_mapping_v2:
- boot_index: 0
volume_id: {get_resource: kube_node_volume}
delete_on_termination: true
kube_minion_eth0:
type: OS::Neutron::Port
properties:
network: {get_param: fixed_network}
security_groups:
- get_param: secgroup_kube_minion_id
fixed_ips:
- subnet: {get_param: fixed_subnet}
allowed_address_pairs:
- ip_address: {get_param: pods_network_cidr}
replacement_policy: AUTO
kube_minion_floating:
type: Magnum::Optional::KubeMinion::Neutron::FloatingIP
properties:
floating_network: {get_param: external_network}
port_id: {get_resource: kube_minion_eth0}
depends_on: kube-minion
######################################################################
#
# docker storage. This allocates a cinder volume and attaches it
# to the minion.
#
docker_volume:
type: Magnum::Optional::Cinder::Volume
properties:
size: {get_param: docker_volume_size}
volume_type: {get_param: docker_volume_type}
docker_volume_attach:
type: Magnum::Optional::Cinder::VolumeAttachment
properties:
instance_uuid: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
volume_id: {get_resource: docker_volume}
mountpoint: /dev/vdb
upgrade_kubernetes:
type: OS::Heat::SoftwareConfig
properties:
group: script
inputs:
- name: kube_tag_input
config:
get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh
upgrade_kubernetes_deployment:
type: OS::Heat::SoftwareDeployment
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: upgrade_kubernetes}
server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
actions: ['UPDATE']
input_values:
kube_tag_input: {get_param: kube_tag}
outputs:
kube_minion_ip:
value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]}
description: >
This is the "public" IP address of the Kubernetes minion node.
kube_minion_external_ip:
value: {get_attr: [kube_minion_floating, floating_ip_address]}
description: >
This is the "public" IP address of the Kubernetes minion node.
######################################################################
#
# NOTE(flwang): Returning the minion node server ID here so that
# consumer can send API request to Heat to remove a particular
# node with removal_policies. Otherwise, the consumer (e.g. AutoScaler)
# has to use index to do the remove which is confusing out of the
# OpenStack world.
# https://storyboard.openstack.org/#!/story/2005054
#
######################################################################
OS::stack_id:
value: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
description: >
This is the Nova server id of the node.

View File

@ -0,0 +1,84 @@
{
"ignition": {
"config": {
"replace": {
"source": null,
"verification": {}
}
},
"security": {
"tls": {}
},
"timeouts": {},
"version": "3.0.0"
},
"passwd": {
"users": [
{
"name": "core",
"sshAuthorizedKeys": [
"$SSH_KEY_VALUE"
]
}
]
},
"storage": {
"directories":[
{
"path": "/var/lib/cloud/data",
"group": {"name": "root"},
"user": {"name": "root"},
"mode": 644
},
{
"path": "/var/lib/heat-cfntools",
"group": {"name": "root"},
"user": {"name": "root"},
"mode": 644
}
],
"files": [
{
"filesystem": "root",
"path": "/etc/hostname",
"mode": 420,
"contents": { "source": "data:,$HOSTNAME" }
},
{
"filesystem": "root",
"group": {"name": "root"},
"path": "/etc/pki/ca-trust/source/anchors/openstack-ca.pem",
"user": {"name": "root"},
"contents": {
"source": "data:,$OPENSTACK_CA",
"verification": {}
},
"mode": 644
},
{
"user": {"name": "root"},
"group": {"name": "root"},
"mode": 700,
"path": "/root/configure-agent-env.sh",
"contents": {
"source": "data:,%23!%2Fbin%2Fbash%0A%0Aset%20-x%0Aset%20-e%0Aset%20%2Bu%0A%0Auntil%20%5B%20-f%20%2Fetc%2Fpki%2Fca-trust%2Fsource%2Fanchors%2Fopenstack-ca.pem%20%5D%0Ado%0A%20%20%20%20echo%20%22waiting%20for%20%2Fetc%2Fpki%2Fca-trust%2Fsource%2Fanchors%2Fopenstack-ca.pem%22%0A%20%20%20%20sleep%203s%0Adone%0A%2Fusr%2Fbin%2Fupdate-ca-trust%0A%0AHTTP_PROXY%3D%22%24HTTP_PROXY%22%0AHTTPS_PROXY%3D%22%24HTTPS_PROXY%22%0ANO_PROXY%3D%22%24NO_PROXY%22%0ACONTAINER_INFRA_PREFIX%3D%22%24CONTAINER_INFRA_PREFIX%22%0AHEAT_CONTAINER_AGENT_TAG%3D%22%24HEAT_CONTAINER_AGENT_TAG%22%0A%0A%0Aif%20%5B%20-n%20%22%24%7BHTTP_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20HTTP_PROXY%0Afi%0A%0Aif%20%5B%20-n%20%22%24%7BHTTPS_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20HTTPS_PROXY%0Afi%0A%0Aif%20%5B%20-n%20%22%24%7BNO_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20NO_PROXY%0Afi%0A%0A%23%20Create%20a%20keypair%20for%20the%20heat-container-agent%20to%0A%23%20access%20the%20node%20over%20ssh.%20It%20is%20useful%20to%20operate%0A%23%20in%20host%20mount%20namespace%20and%20apply%20configuration.%0Aid%0Amkdir%20-p%20%2Fsrv%2Fmagnum%2F.ssh%0Achmod%200700%20%2Fsrv%2Fmagnum%2F.ssh%0A%23touch%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Assh-keygen%20-q%20-t%20rsa%20-N%20''%20-f%20%2Ftmp%2Fheat_agent_rsa%0Amv%20%2Ftmp%2Fheat_agent_rsa%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Amv%20%2Ftmp%2Fheat_agent_rsa.pub%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%0Achmod%200400%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Achmod%200400%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%0A%23%20Add%20the%20public%20to%20the%20host%20authorized_keys%20file.%0Amkdir%20-p%20%2Froot%2F.ssh%0Achmod%200700%20%2Froot%2F.ssh%0Acat%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%20%3E%20%2Froot%2F.ssh%2Fauthorized_keys%0A%23%20Add%20localost%20to%20know_hosts%0Assh-keyscan%20127.0.0.1%20%3E%20%2Fsrv%2Fmagnum%2F.ssh%2Fknown_hosts%0A%23%20ssh%20configguration%20file%2C%20to%20be%20specified%20with%20ssh%20-F%0Acat%20%3E%20%2Fsrv%2Fmagnum%2F.ssh%2Fconfig%20%3C%3CEOF%0AHost%20localhost%0A%20%20%20%20%20HostName%20127.0.0.1%0A%20%20%20%20%20User%20root%0A%20%20%20%20%20IdentityFile%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0A%20%20%20%20%20UserKnownHostsFile%20%2Fsrv%2Fmagnum%2F.ssh%2Fknown_hosts%0AEOF%0A%0Ased%20-i%20'%2F%5EPermitRootLogin%2F%20s%2F%20.*%2F%20without-password%2F'%20%2Fetc%2Fssh%2Fsshd_config%0A%23%20Security%20enhancement%3A%20Disable%20password%20authentication%0Ased%20-i%20'%2F%5EPasswordAuthentication%20yes%2F%20s%2F%20yes%2F%20no%2F'%20%2Fetc%2Fssh%2Fsshd_config%0A%0Asystemctl%20restart%20sshd%0A",
"verification": {}
}
}
]
},
"systemd": {
"units": [
{
"name": "configure-agent-env.service",
"enabled": true,
"contents": "[Unit]\nDescription=Configure heat agent environment\nAfter=sshd.service\n\n[Service]\nUser=root\nGroup=root\nType=simple\nExecStart=/bin/bash /root/configure-agent-env.sh\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target"
},
{
"name": "heat-container-agent.service",
"enabled": true,
"contents": "[Unit]\nDescription=Run heat-container-agent\nAfter=network-online.target configure-agent-env.service\nWants=network-online.target\n\n[Service]\nExecStartPre=mkdir -p /var/lib/heat-container-agent\nExecStartPre=mkdir -p /var/run/heat-config\nExecStartPre=mkdir -p /var/run/os-collect-config\nExecStartPre=mkdir -p /opt/stack/os-config-refresh\nExecStartPre=-mv /var/lib/os-collect-config/local-data /var/lib/cloud/data/cfn-init-data\nExecStartPre=mkdir -p /srv/magnum\nExecStartPre=-/bin/podman kill heat-container-agent\nExecStartPre=-/bin/podman rm heat-container-agent\nExecStartPre=-/bin/podman pull docker.io/openstackmagnum/heat-container-agent:train-dev\nExecStart=/bin/podman run \\\n --name heat-container-agent \\\n --privileged \\\n --volume /srv/magnum:/srv/magnum \\\n --volume /opt/stack/os-config-refresh:/opt/stack/os-config-refresh \\\n --volume /run/systemd:/run/systemd \\\n --volume /etc/:/etc/ \\\n --volume /var/lib:/var/lib \\\n --volume /var/run:/var/run \\\n --volume /var/log:/var/log \\\n --volume /tmp:/tmp \\\n --volume /dev:/dev \\\n --env REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/openstack-ca.pem --net=host \\\n docker.io/openstackmagnum/heat-container-agent:train-dev \\\n /usr/bin/start-heat-container-agent\nTimeoutStartSec=10min\n\nExecStop=/bin/podman stop heat-container-agent\n\n[Install]\nWantedBy=multi-user.target\n"
}
]
}
}

View File

@ -0,0 +1,17 @@
# Copyright 2016 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version = '1.0.0'
driver = 'k8s_fedora_atomic_v1'
container_version = '1.12.6'

View File

@ -0,0 +1,12 @@
---
features:
- |
Add fedora coreos driver. To deploy clusters with fedora coreos operators
or users need to add os_distro=fedora-coreos to the image. The scripts
to deploy kubernetes on top are the same with fedora atomic. Note that
this driver has selinux enabled.
issues:
- |
The startup of the heat-container-agent uses a workaround to copy the
SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data.
The fedora coreos driver requires heat train to support ignition.

View File

@ -61,6 +61,7 @@ oslo.policy.policies =
magnum.drivers =
k8s_fedora_atomic_v1 = magnum.drivers.k8s_fedora_atomic_v1.driver:Driver
k8s_fedora_coreos_v1 = magnum.drivers.k8s_fedora_coreos_v1.driver:Driver
k8s_coreos_v1 = magnum.drivers.k8s_coreos_v1.driver:Driver
swarm_fedora_atomic_v1 = magnum.drivers.swarm_fedora_atomic_v1.driver:Driver
swarm_fedora_atomic_v2 = magnum.drivers.swarm_fedora_atomic_v2.driver:Driver