diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh index 0093bdfa23..9f2bc8e9cc 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh @@ -42,6 +42,9 @@ EOF } systemctl restart NetworkManager fi +elif [ "$NETWORK_DRIVER" = "flannel" ]; then + $ssh_cmd modprobe vxlan + echo "vxlan" > /etc/modules-load.d/vxlan.conf fi @@ -182,6 +185,7 @@ ExecStart=/bin/bash -c '/usr/bin/podman run --name kubelet \\ --volume /etc/ssl/certs:/etc/ssl/certs:ro \\ --volume /lib/modules:/lib/modules:ro \\ --volume /run:/run \\ + --volume /dev:/dev \\ --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \\ --volume /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \\ --volume /etc/pki/tls/certs:/usr/share/ca-certificates:ro \\ diff --git a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh index 06b81114c2..9ee0414dfb 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh @@ -44,6 +44,9 @@ EOF } $ssh_cmd systemctl restart NetworkManager fi +elif [ "$NETWORK_DRIVER" = "flannel" ]; then + $ssh_cmd modprobe vxlan + echo "vxlan" > /etc/modules-load.d/vxlan.conf fi mkdir -p /srv/magnum/kubernetes/ @@ -81,6 +84,7 @@ ExecStart=/bin/bash -c '/usr/bin/podman run --name kubelet \\ --volume /etc/ssl/certs:/etc/ssl/certs:ro \\ --volume /lib/modules:/lib/modules:ro \\ --volume /run:/run \\ + --volume /dev:/dev \\ --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \\ --volume /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \\ --volume /etc/pki/tls/certs:/usr/share/ca-certificates:ro \\ diff --git a/magnum/drivers/common/templates/kubernetes/fragments/flannel-service.sh b/magnum/drivers/common/templates/kubernetes/fragments/flannel-service.sh index 02b2383797..48d70c08a3 100644 --- a/magnum/drivers/common/templates/kubernetes/fragments/flannel-service.sh +++ b/magnum/drivers/common/templates/kubernetes/fragments/flannel-service.sh @@ -175,6 +175,11 @@ spec: tier: node app: flannel spec: + # https://pagure.io/atomic/kubernetes-sig/issue/3 + # https://danwalsh.livejournal.com/74754.html + securityContext: + seLinuxOptions: + type: "spc_t" hostNetwork: true nodeSelector: beta.kubernetes.io/arch: amd64 diff --git a/magnum/drivers/heat/driver.py b/magnum/drivers/heat/driver.py index 33a243af01..b55b3cda88 100755 --- a/magnum/drivers/heat/driver.py +++ b/magnum/drivers/heat/driver.py @@ -13,6 +13,7 @@ import abc import collections import os +from pbr.version import SemanticVersion as SV import six from string import ascii_letters @@ -320,6 +321,86 @@ class KubernetesDriver(HeatDriver): raise NotImplementedError("Must implement 'upgrade_cluster'") +class FedoraKubernetesDriver(KubernetesDriver): + """Base driver for Kubernetes clusters.""" + + def upgrade_cluster(self, context, cluster, cluster_template, + max_batch_size, nodegroup, scale_manager=None, + rollback=False): + osc = clients.OpenStackClients(context) + _, heat_params, _ = ( + self._extract_template_definition(context, cluster, + scale_manager=scale_manager)) + # Extract labels/tags from cluster not template + # There are some version tags are not decalared in labels explicitly, + # so we need to get them from heat_params based on the labels given in + # new cluster template. + current_addons = {} + new_addons = {} + for label in cluster_template.labels: + # This is upgrade API, so we don't introduce new stuff by this API, + # but just focus on the version change. + new_addons[label] = cluster_template.labels[label] + if ((label.endswith('_tag') or + label.endswith('_version')) and label in heat_params): + current_addons[label] = heat_params[label] + try: + if (SV.from_pip_string(new_addons[label]) < + SV.from_pip_string(current_addons[label])): + raise exception.InvalidVersion(tag=label) + except Exception as e: + # NOTE(flwang): Different cloud providers may use different + # tag/version format which maybe not able to parse by + # SemanticVersion. For this case, let's just skip it. + LOG.debug("Failed to parse tag/version %s", str(e)) + + heat_params["master_image"] = cluster_template.image_id + heat_params["minion_image"] = cluster_template.image_id + # NOTE(flwang): Overwrite the kube_tag as well to avoid a server + # rebuild then do the k8s upgrade again, when both image id and + # kube_tag changed + heat_params["kube_tag"] = cluster_template.labels["kube_tag"] + heat_params["kube_version"] = cluster_template.labels["kube_tag"] + heat_params["master_kube_tag"] = cluster_template.labels["kube_tag"] + heat_params["minion_kube_tag"] = cluster_template.labels["kube_tag"] + heat_params["update_max_batch_size"] = max_batch_size + # Rules: 1. No downgrade 2. Explicitly override 3. Merging based on set + # Update heat_params based on the data generated above + del heat_params['kube_service_account_private_key'] + del heat_params['kube_service_account_key'] + + for label in new_addons: + heat_params[label] = cluster_template.labels[label] + + cluster['cluster_template_id'] = cluster_template.uuid + new_labels = cluster.labels.copy() + new_labels.update(cluster_template.labels) + cluster['labels'] = new_labels + + fields = { + 'existing': True, + 'parameters': heat_params, + 'disable_rollback': not rollback + } + osc.heat().stacks.update(cluster.stack_id, **fields) + + def get_nodegroup_extra_params(self, cluster, osc): + network = osc.heat().resources.get(cluster.stack_id, 'network') + secgroup = osc.heat().resources.get(cluster.stack_id, + 'secgroup_kube_minion') + for output in osc.heat().stacks.get(cluster.stack_id).outputs: + if output['output_key'] == 'api_address': + api_address = output['output_value'] + break + extra_params = { + 'existing_master_private_ip': api_address, + 'existing_security_group': secgroup.attributes['id'], + 'fixed_network': network.attributes['fixed_network'], + 'fixed_subnet': network.attributes['fixed_subnet'], + } + return extra_params + + class HeatPoller(object): def __init__(self, openstack_client, context, cluster, cluster_driver): diff --git a/magnum/drivers/k8s_fedora_atomic_v1/driver.py b/magnum/drivers/k8s_fedora_atomic_v1/driver.py index c48153c48e..b2c046c998 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/driver.py +++ b/magnum/drivers/k8s_fedora_atomic_v1/driver.py @@ -13,17 +13,14 @@ # under the License. from oslo_log import log as logging -from pbr.version import SemanticVersion as SV -from magnum.common import clients -from magnum.common import exception from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_atomic_v1 import template_def LOG = logging.getLogger(__name__) -class Driver(driver.KubernetesDriver): +class Driver(driver.FedoraKubernetesDriver): @property def provides(self): @@ -35,79 +32,3 @@ class Driver(driver.KubernetesDriver): def get_template_definition(self): return template_def.AtomicK8sTemplateDefinition() - - def upgrade_cluster(self, context, cluster, cluster_template, - max_batch_size, nodegroup, scale_manager=None, - rollback=False): - osc = clients.OpenStackClients(context) - _, heat_params, _ = ( - self._extract_template_definition(context, cluster, - scale_manager=scale_manager)) - # Extract labels/tags from cluster not template - # There are some version tags are not decalared in labels explicitly, - # so we need to get them from heat_params based on the labels given in - # new cluster template. - current_addons = {} - new_addons = {} - for label in cluster_template.labels: - # This is upgrade API, so we don't introduce new stuff by this API, - # but just focus on the version change. - new_addons[label] = cluster_template.labels[label] - if ((label.endswith('_tag') or - label.endswith('_version')) and label in heat_params): - current_addons[label] = heat_params[label] - try: - if (SV.from_pip_string(new_addons[label]) < - SV.from_pip_string(current_addons[label])): - raise exception.InvalidVersion(tag=label) - except Exception as e: - # NOTE(flwang): Different cloud providers may use different - # tag/version format which maybe not able to parse by - # SemanticVersion. For this case, let's just skip it. - LOG.debug("Failed to parse tag/version %s", str(e)) - - heat_params["master_image"] = cluster_template.image_id - heat_params["minion_image"] = cluster_template.image_id - # NOTE(flwang): Overwrite the kube_tag as well to avoid a server - # rebuild then do the k8s upgrade again, when both image id and - # kube_tag changed - heat_params["kube_tag"] = cluster_template.labels["kube_tag"] - heat_params["kube_version"] = cluster_template.labels["kube_tag"] - heat_params["master_kube_tag"] = cluster_template.labels["kube_tag"] - heat_params["minion_kube_tag"] = cluster_template.labels["kube_tag"] - heat_params["update_max_batch_size"] = max_batch_size - # Rules: 1. No downgrade 2. Explicitly override 3. Merging based on set - # Update heat_params based on the data generated above - del heat_params['kube_service_account_private_key'] - del heat_params['kube_service_account_key'] - - for label in new_addons: - heat_params[label] = cluster_template.labels[label] - - cluster['cluster_template_id'] = cluster_template.uuid - new_labels = cluster.labels.copy() - new_labels.update(cluster_template.labels) - cluster['labels'] = new_labels - - fields = { - 'existing': True, - 'parameters': heat_params, - 'disable_rollback': not rollback - } - osc.heat().stacks.update(cluster.stack_id, **fields) - - def get_nodegroup_extra_params(self, cluster, osc): - network = osc.heat().resources.get(cluster.stack_id, 'network') - secgroup = osc.heat().resources.get(cluster.stack_id, - 'secgroup_kube_minion') - for output in osc.heat().stacks.get(cluster.stack_id).outputs: - if output['output_key'] == 'api_address': - api_address = output['output_value'] - break - extra_params = { - 'existing_master_private_ip': api_address, - 'existing_security_group': secgroup.attributes['id'], - 'fixed_network': network.attributes['fixed_network'], - 'fixed_subnet': network.attributes['fixed_subnet'], - } - return extra_params diff --git a/magnum/drivers/k8s_fedora_coreos_v1/__init__.py b/magnum/drivers/k8s_fedora_coreos_v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/magnum/drivers/k8s_fedora_coreos_v1/driver.py b/magnum/drivers/k8s_fedora_coreos_v1/driver.py new file mode 100644 index 0000000000..e8685560f8 --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/driver.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from magnum.drivers.heat import driver +from magnum.drivers.k8s_fedora_coreos_v1 import template_def + +LOG = logging.getLogger(__name__) + + +class Driver(driver.FedoraKubernetesDriver): + + @property + def provides(self): + return [ + {'server_type': 'vm', + 'os': 'fedora-coreos', + 'coe': 'kubernetes'}, + ] + + def get_template_definition(self): + return template_def.FCOSK8sTemplateDefinition() diff --git a/magnum/drivers/k8s_fedora_coreos_v1/template_def.py b/magnum/drivers/k8s_fedora_coreos_v1/template_def.py new file mode 100644 index 0000000000..f12114a193 --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/template_def.py @@ -0,0 +1,45 @@ +# Copyright 2016 Rackspace Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import six.moves.urllib.parse as urlparse + +from magnum.common import utils +import magnum.conf +from magnum.drivers.heat import k8s_fedora_template_def as kftd + +CONF = magnum.conf.CONF + + +class FCOSK8sTemplateDefinition(kftd.K8sFedoraTemplateDefinition): + """Kubernetes template for a Fedora Atomic VM.""" + + @property + def driver_module_path(self): + return __name__[:__name__.rindex('.')] + + @property + def template_path(self): + return os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'templates/kubecluster.yaml') + + def get_params(self, context, cluster_template, cluster, **kwargs): + extra_params = super(FCOSK8sTemplateDefinition, + self).get_params(context, + cluster_template, + cluster, + **kwargs) + extra_params['openstack_ca'] = urlparse.quote( + utils.get_openstack_ca()) + return extra_params diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/COPYING b/magnum/drivers/k8s_fedora_coreos_v1/templates/COPYING new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml new file mode 100644 index 0000000000..e3d7209e1b --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubecluster.yaml @@ -0,0 +1,1261 @@ +heat_template_version: queens + +description: > + This template will boot a Kubernetes cluster with one or more + minions (as specified by the number_of_minions parameter, which + defaults to 1). + +conditions: + master_only: + or: + - equals: + - get_param: role + - "master" + - equals: + - get_param: is_cluster_stack + - true + + worker_only: + or: + - equals: + - get_param: role + - "worker" + - equals: + - get_param: is_cluster_stack + - true + + create_cluster_resources: + equals: + - get_param: is_cluster_stack + - true + +parameters: + + # needs to become a list if we want to join master nodes? + existing_master_private_ip: + type: string + default: "" + + is_cluster_stack: + type: boolean + default: false + + role: + type: string + default: "" + + existing_security_group: + type: string + default: "" + + ssh_key_name: + type: string + description: name of ssh key to be provisioned on our server + default: "" + + ssh_public_key: + type: string + description: The public ssh key to add in all nodes + default: "" + + external_network: + type: string + description: uuid of a network to use for floating ip addresses + + fixed_network: + type: string + description: uuid/name of an existing network to use to provision machines + default: "" + + fixed_network_name: + type: string + description: name of a private network to use to provision machines + default: "private" + + fixed_subnet: + type: string + description: uuid/name of an existing subnet to use to provision machines + default: "" + + master_image: + type: string + description: glance image used to boot the server + # When creating a new minion nodegroup this will not + # be provided by magnum. So make it default to "" + default: "" + + minion_image: + type: string + description: glance image used to boot the server + # When creating a new master nodegroup this will not + # be provided by magnum. So make it default to "" + default: "" + + master_flavor: + type: string + default: m1.small + description: flavor to use when booting the server for master nodes + + minion_flavor: + type: string + default: m1.small + description: flavor to use when booting the server for minions + + prometheus_monitoring: + type: boolean + default: false + description: > + whether or not to have the grafana-prometheus-cadvisor monitoring setup + + grafana_admin_passwd: + type: string + default: admin + hidden: true + description: > + admin user password for the Grafana monitoring interface + + dns_nameserver: + type: comma_delimited_list + description: address of a DNS nameserver reachable in your environment + default: 8.8.8.8 + + number_of_masters: + type: number + description: how many kubernetes masters to spawn + default: 1 + + number_of_minions: + type: number + description: how many kubernetes minions to spawn + default: 1 + + fixed_network_cidr: + type: string + description: network range for fixed ip network + default: 10.0.0.0/24 + + portal_network_cidr: + type: string + description: > + address range used by kubernetes for service portals + default: 10.254.0.0/16 + + network_driver: + type: string + description: network driver to use for instantiating container networks + default: flannel + + flannel_network_cidr: + type: string + description: network range for flannel overlay network + default: 10.100.0.0/16 + + flannel_network_subnetlen: + type: number + description: size of subnet assigned to each minion + default: 24 + + flannel_backend: + type: string + description: > + specify the backend for flannel, default vxlan backend + default: "vxlan" + constraints: + - allowed_values: ["udp", "vxlan", "host-gw"] + + system_pods_initial_delay: + type: number + description: > + health check, time to wait for system pods (podmaster, scheduler) to boot + (in seconds) + default: 30 + + system_pods_timeout: + type: number + description: > + health check, timeout for system pods (podmaster, scheduler) to answer. + (in seconds) + default: 5 + + admission_control_list: + type: string + description: > + List of admission control plugins to activate + default: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + + kube_allow_priv: + type: string + description: > + whether or not kubernetes should permit privileged containers. + default: "true" + constraints: + - allowed_values: ["true", "false"] + + boot_volume_size: + type: number + description: > + size of the cinder boot volume for nodes root volume + + boot_volume_type: + type: string + description: > + type of the cinder boot volume for nodes root volume + + etcd_volume_size: + type: number + description: > + size of the cinder volume for etcd storage + default: 0 + + etcd_volume_type: + type: string + description: > + type of a cinder volume for etcd storage + + docker_volume_size: + type: number + description: > + size of a cinder volume to allocate to docker for container/image + storage + default: 0 + + docker_volume_type: + type: string + description: > + type of a cinder volume to allocate to docker for container/image + storage + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + + cgroup_driver: + type: string + description: > + cgroup driver name that kubelet should use, ideally the same as + the docker cgroup driver. + default: "cgroupfs" + + traefik_ingress_controller_tag: + type: string + description: tag of the traefik containers to be used. + default: v1.7.10 + + wait_condition_timeout: + type: number + description: > + timeout for the Wait Conditions + default: 6000 + + minions_to_remove: + type: comma_delimited_list + description: > + List of minions to be removed when doing an update. Individual minion may + be referenced several ways: (1) The resource name (e.g. ['1', '3']), + (2) The private IP address ['10.0.0.4', '10.0.0.6']. Note: the list should + be empty when doing an create. + default: [] + + discovery_url: + type: string + description: > + Discovery URL used for bootstrapping the etcd cluster. + + registry_enabled: + type: boolean + description: > + Indicates whether the docker registry is enabled. + default: false + + registry_port: + type: number + description: port of registry service + default: 5000 + + swift_region: + type: string + description: region of swift service + default: "" + + registry_container: + type: string + description: > + name of swift container which docker registry stores images in + default: "container" + + registry_insecure: + type: boolean + description: > + indicates whether to skip TLS verification between registry and backend storage + default: true + + registry_chunksize: + type: number + description: > + size fo the data segments for the swift dynamic large objects + default: 5242880 + + volume_driver: + type: string + description: volume driver to use for container storage + default: "" + + region_name: + type: string + description: A logically separate section of the cluster + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password, not set in current implementation, only used to + fill in for Kubernetes config file + default: + ChangeMe + hidden: true + + loadbalancing_protocol: + type: string + description: > + The protocol which is used for load balancing. If you want to change + tls_disabled option to 'True', please change this to "HTTP". + default: TCP + constraints: + - allowed_values: ["TCP", "HTTP"] + + tls_disabled: + type: boolean + description: whether or not to disable TLS + default: False + + kube_dashboard_enabled: + type: boolean + description: whether or not to enable kubernetes dashboard + default: True + + influx_grafana_dashboard_enabled: + type: boolean + description: Enable influxdb with grafana dashboard for data from heapster + default: False + + verify_ca: + type: boolean + description: whether or not to validate certificate authority + + kubernetes_port: + type: number + description: > + The port which are used by kube-apiserver to provide Kubernetes + service. + default: 6443 + + cluster_uuid: + type: string + description: identifier for the cluster this template is generating + + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + + http_proxy: + type: string + description: http proxy address for docker + default: "" + + https_proxy: + type: string + description: https proxy address for docker + default: "" + + no_proxy: + type: string + description: no proxies for docker + default: "" + + trustee_domain_id: + type: string + description: domain id of the trustee + + trustee_user_id: + type: string + description: user id of the trustee + + trustee_username: + type: string + description: username of the trustee + + trustee_password: + type: string + description: password of the trustee + hidden: true + + trust_id: + type: string + description: id of the trust which is used by the trustee + hidden: true + + auth_url: + type: string + description: url for keystone + + kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + default: v1.14.3 + + master_kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + default: v1.14.3 + + minion_kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + default: v1.14.3 + + # FIXME update cloud_provider_tag when a fix for PVC is released + # https://github.com/kubernetes/cloud-provider-openstack/pull/405 + cloud_provider_tag: + type: string + description: + tag of the kubernetes/cloud-provider-openstack + https://hub.docker.com/r/k8scloudprovider/openstack-cloud-controller-manager/tags/ + default: v1.14.0 + + cloud_provider_enabled: + type: boolean + description: Enable or disable the openstack kubernetes cloud provider + + etcd_tag: + type: string + description: tag of the etcd system container + default: 3.2.26 + + coredns_tag: + type: string + description: tag for coredns + default: 1.3.1 + + flannel_tag: + type: string + description: tag of the flannel container + default: v0.11.0-amd64 + + flannel_cni_tag: + type: string + description: tag of the flannel cni container + default: v0.3.0 + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + default: v1.14.3 + + kube_dashboard_version: + type: string + description: version of kubernetes dashboard used for kubernetes cluster + default: v1.8.3 + + insecure_registry_url: + type: string + description: insecure registry url + default: "" + + container_infra_prefix: + type: string + description: > + prefix of container images used in the cluster, kubernetes components, + kubernetes-dashboard, coredns etc + constraints: + - allowed_pattern: "^$|.*/" + default: "" + + dns_service_ip: + type: string + description: > + address used by Kubernetes DNS service + default: 10.254.0.10 + + dns_cluster_domain: + type: string + description: > + domain name for cluster DNS + default: "cluster.local" + + openstack_ca: + type: string + hidden: true + description: The OpenStack CA certificate to install on the node. + + nodes_affinity_policy: + type: string + description: > + affinity policy for nodes server group + constraints: + - allowed_values: ["affinity", "anti-affinity", "soft-affinity", + "soft-anti-affinity"] + + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + + cert_manager_api: + type: boolean + description: true if the kubernetes cert api manager should be enabled + default: false + + ca_key: + type: string + description: key of internal ca for the kube certificate api manager + default: "" + hidden: true + + calico_tag: + type: string + description: tag of the calico containers used to provision the calico node + default: v3.3.6 + + calico_kube_controllers_tag: + type: string + description: tag of the kube_controllers used to provision the calico node + default: v1.0.3 + + calico_ipv4pool: + type: string + description: Configure the IP pool from which Pod IPs will be chosen + default: "192.168.0.0/16" + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + ingress_controller: + type: string + description: > + ingress controller backend to use + default: "" + + ingress_controller_role: + type: string + description: > + node role where the ingress controller backend should run + default: "ingress" + + octavia_ingress_controller_tag: + type: string + description: Octavia ingress controller docker image tag. + default: "1.13.2-alpha" + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + default: "" + + kubeapi_options: + type: string + description: > + additional options to be passed to the api + default: "" + + kubecontroller_options: + type: string + description: > + additional options to be passed to the controller manager + default: "" + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + default: "" + + kubescheduler_options: + type: string + description: > + additional options to be passed to the scheduler + default: "" + + octavia_enabled: + type: boolean + description: > + whether or not to use Octavia for LoadBalancer type service. + default: False + + kube_service_account_key: + type: string + hidden: true + description: > + The signed cert will be used to verify the k8s service account tokens + during authentication. + + kube_service_account_private_key: + type: string + hidden: true + description: > + The private key will be used to sign generated k8s service account + tokens. + + prometheus_tag: + type: string + description: tag of the prometheus container + default: v1.8.2 + + grafana_tag: + type: string + description: tag of grafana container + default: 5.1.5 + + heat_container_agent_tag: + type: string + description: tag of the heat_container_agent system container + default: train-dev + + keystone_auth_enabled: + type: boolean + description: > + true if the keystone authN and authZ should be enabled + default: + true + + keystone_auth_default_policy: + type: string + description: Json read from /etc/magnum/keystone_auth_default_policy.json + default: "" + + k8s_keystone_auth_tag: + type: string + description: tag of the k8s_keystone_auth container + default: v1.14.0 + + monitoring_enabled: + type: boolean + description: Enable or disable prometheus-operator monitoring solution. + default: false + + prometheus_operator_chart_tag: + type: string + description: The stable/prometheus-operator chart version to use. + default: 5.12.3 + + project_id: + type: string + description: > + project id of current project + + tiller_enabled: + type: boolean + description: Choose whether to install tiller or not. + default: false + + tiller_tag: + type: string + description: tag of tiller container + default: "v2.12.3" + + tiller_namespace: + type: string + description: namespace where tiller will be installed. + default: "magnum-tiller" + + auto_healing_enabled: + type: boolean + description: > + true if the auto healing feature should be enabled + default: + false + + auto_healing_controller: + type: string + description: > + The service to be deployed for auto-healing. + default: "draino" + + magnum_auto_healer_tag: + type: string + description: tag of the magnum-auto-healer service. + default: "v1.15.0" + + auto_scaling_enabled: + type: boolean + description: > + true if the auto scaling feature should be enabled + default: + false + + node_problem_detector_tag: + type: string + description: tag of the node problem detector container + default: v0.6.2 + + nginx_ingress_controller_tag: + type: string + description: nginx ingress controller docker image tag + default: 0.23.0 + + draino_tag: + type: string + description: tag of the draino container + default: abf028a + + autoscaler_tag: + type: string + description: tag of the autoscaler container + default: v1.0 + + min_node_count: + type: number + description: > + minimum node count of cluster workers when doing scale down + default: 1 + + max_node_count: + type: number + description: > + maximum node count of cluster workers when doing scale up + + update_max_batch_size: + type: number + description: > + max batch size when doing rolling upgrade + default: 1 + + npd_enabled: + type: boolean + description: > + true if the npd service should be launched + default: + true + +resources: + + ###################################################################### + # + # network resources. allocate a network and router for our server. + # Important: the Load Balancer feature in Kubernetes requires that + # the name for the fixed_network must be "private" for the + # address lookup in Kubernetes to work properly + # + + network: + condition: create_cluster_resources + type: ../../common/templates/network.yaml + properties: + existing_network: {get_param: fixed_network} + existing_subnet: {get_param: fixed_subnet} + private_network_cidr: {get_param: fixed_network_cidr} + dns_nameserver: {get_param: dns_nameserver} + external_network: {get_param: external_network} + private_network_name: {get_param: fixed_network_name} + + api_lb: + condition: create_cluster_resources + type: ../../common/templates/lb_api.yaml + properties: + fixed_subnet: {get_attr: [network, fixed_subnet]} + external_network: {get_param: external_network} + protocol: {get_param: loadbalancing_protocol} + port: {get_param: kubernetes_port} + + etcd_lb: + condition: create_cluster_resources + type: ../../common/templates/lb_etcd.yaml + properties: + fixed_subnet: {get_attr: [network, fixed_subnet]} + protocol: {get_param: loadbalancing_protocol} + port: 2379 + + ###################################################################### + # + # security groups. we need to permit network traffic of various + # sorts. + # + + secgroup_kube_master: + condition: create_cluster_resources + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: icmp + - protocol: tcp + port_range_min: 22 + port_range_max: 22 + - protocol: tcp + port_range_min: 7080 + port_range_max: 7080 + - protocol: tcp + port_range_min: 8080 + port_range_max: 8080 + - protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + - protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + - protocol: tcp + port_range_min: 6443 + port_range_max: 6443 + - protocol: tcp + port_range_min: 9100 + port_range_max: 9100 + - protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + - protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + - protocol: udp + port_range_min: 8472 + port_range_max: 8472 + + secgroup_kube_minion: + condition: create_cluster_resources + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: icmp + # Default port range for external service ports. + # In future, if the option `manage-security-groups` for ccm works + # well, we could remove this rule here. + # The PR in ccm is + # https://github.com/kubernetes/cloud-provider-openstack/pull/491 + - protocol: tcp + port_range_min: 22 + port_range_max: 22 + - protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + # allow any traffic from master nodes + - protocol: tcp + port_range_min: 1 + port_range_max: 65535 + remote_mode: 'remote_group_id' + remote_group_id: {get_resource: secgroup_kube_master} + - protocol: udp + port_range_min: 1 + port_range_max: 65535 + remote_mode: 'remote_group_id' + remote_group_id: {get_resource: secgroup_kube_master} + + # allow any traffic between worker nodes + secgroup_rule_tcp_kube_minion: + condition: create_cluster_resources + type: OS::Neutron::SecurityGroupRule + properties: + protocol: tcp + port_range_min: 1 + port_range_max: 65535 + security_group: {get_resource: secgroup_kube_minion} + remote_group: {get_resource: secgroup_kube_minion} + secgroup_rule_udp_kube_minion: + condition: create_cluster_resources + type: OS::Neutron::SecurityGroupRule + properties: + protocol: udp + port_range_min: 1 + port_range_max: 65535 + security_group: {get_resource: secgroup_kube_minion} + remote_group: {get_resource: secgroup_kube_minion} + + ###################################################################### + # + # resources that expose the IPs of either the kube master or a given + # LBaaS pool depending on whether LBaaS is enabled for the cluster. + # + + api_address_lb_switch: + condition: create_cluster_resources + type: Magnum::ApiGatewaySwitcher + properties: + pool_public_ip: {get_attr: [api_lb, floating_address]} + pool_private_ip: {get_attr: [api_lb, address]} + master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} + master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} + + etcd_address_lb_switch: + condition: create_cluster_resources + type: Magnum::ApiGatewaySwitcher + properties: + pool_private_ip: {get_attr: [etcd_lb, address]} + master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} + + ###################################################################### + # + # resources that expose the IPs of either floating ip or a given + # fixed ip depending on whether FloatingIP is enabled for the cluster. + # + + api_address_floating_switch: + condition: create_cluster_resources + type: Magnum::FloatingIPAddressSwitcher + properties: + public_ip: {get_attr: [api_address_lb_switch, public_ip]} + private_ip: {get_attr: [api_address_lb_switch, private_ip]} + + ###################################################################### + # + # resources that expose one server group for each master and worker nodes + # separately. + # + + master_nodes_server_group: + condition: master_only + type: OS::Nova::ServerGroup + properties: + policies: [{get_param: nodes_affinity_policy}] + + worker_nodes_server_group: + condition: worker_only + type: OS::Nova::ServerGroup + properties: + policies: [{get_param: nodes_affinity_policy}] + + ###################################################################### + # + # kubernetes masters. This is a resource group that will create + # masters. + # + + kube_masters: + condition: master_only + type: OS::Heat::ResourceGroup + depends_on: + - network + update_policy: + rolling_update: {max_batch_size: {get_param: update_max_batch_size}, pause_time: 30} + properties: + count: {get_param: number_of_masters} + resource_def: + type: kubemaster.yaml + properties: + name: + list_join: + - '-' + - [{ get_param: 'OS::stack_name' }, 'master', '%index%'] + prometheus_monitoring: {get_param: prometheus_monitoring} + grafana_admin_passwd: {get_param: grafana_admin_passwd} + api_public_address: {get_attr: [api_lb, floating_address]} + api_private_address: {get_attr: [api_lb, address]} + ssh_key_name: {get_param: ssh_key_name} + ssh_public_key: {get_param: ssh_public_key} + server_image: {get_param: master_image} + master_flavor: {get_param: master_flavor} + external_network: {get_param: external_network} + kube_allow_priv: {get_param: kube_allow_priv} + boot_volume_size: {get_param: boot_volume_size} + boot_volume_type: {get_param: boot_volume_type} + etcd_volume_size: {get_param: etcd_volume_size} + etcd_volume_type: {get_param: etcd_volume_type} + docker_volume_size: {get_param: docker_volume_size} + docker_volume_type: {get_param: docker_volume_type} + docker_storage_driver: {get_param: docker_storage_driver} + cgroup_driver: {get_param: cgroup_driver} + network_driver: {get_param: network_driver} + flannel_network_cidr: {get_param: flannel_network_cidr} + flannel_network_subnetlen: {get_param: flannel_network_subnetlen} + flannel_backend: {get_param: flannel_backend} + system_pods_initial_delay: {get_param: system_pods_initial_delay} + system_pods_timeout: {get_param: system_pods_timeout} + portal_network_cidr: {get_param: portal_network_cidr} + admission_control_list: {get_param: admission_control_list} + discovery_url: {get_param: discovery_url} + cluster_uuid: {get_param: cluster_uuid} + magnum_url: {get_param: magnum_url} + traefik_ingress_controller_tag: {get_param: traefik_ingress_controller_tag} + volume_driver: {get_param: volume_driver} + region_name: {get_param: region_name} + fixed_network: {get_attr: [network, fixed_network]} + fixed_network_name: {get_param: fixed_network_name} + fixed_subnet: {get_attr: [network, fixed_subnet]} + api_pool_id: {get_attr: [api_lb, pool_id]} + etcd_pool_id: {get_attr: [etcd_lb, pool_id]} + username: {get_param: username} + password: {get_param: password} + kubernetes_port: {get_param: kubernetes_port} + tls_disabled: {get_param: tls_disabled} + kube_dashboard_enabled: {get_param: kube_dashboard_enabled} + influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} + verify_ca: {get_param: verify_ca} + secgroup_kube_master_id: {get_resource: secgroup_kube_master} + http_proxy: {get_param: http_proxy} + https_proxy: {get_param: https_proxy} + no_proxy: {get_param: no_proxy} + kube_tag: {get_param: master_kube_tag} + cloud_provider_tag: {get_param: cloud_provider_tag} + cloud_provider_enabled: {get_param: cloud_provider_enabled} + kube_version: {get_param: kube_version} + etcd_tag: {get_param: etcd_tag} + coredns_tag: {get_param: coredns_tag} + flannel_tag: {get_param: flannel_tag} + flannel_cni_tag: {get_param: flannel_cni_tag} + kube_dashboard_version: {get_param: kube_dashboard_version} + trustee_user_id: {get_param: trustee_user_id} + trustee_password: {get_param: trustee_password} + trust_id: {get_param: trust_id} + auth_url: {get_param: auth_url} + insecure_registry_url: {get_param: insecure_registry_url} + container_infra_prefix: {get_param: container_infra_prefix} + etcd_lb_vip: {get_attr: [etcd_lb, address]} + dns_service_ip: {get_param: dns_service_ip} + dns_cluster_domain: {get_param: dns_cluster_domain} + openstack_ca: {get_param: openstack_ca} + nodes_server_group_id: {get_resource: master_nodes_server_group} + availability_zone: {get_param: availability_zone} + ca_key: {get_param: ca_key} + cert_manager_api: {get_param: cert_manager_api} + calico_tag: {get_param: calico_tag} + calico_kube_controllers_tag: {get_param: calico_kube_controllers_tag} + calico_ipv4pool: {get_param: calico_ipv4pool} + pods_network_cidr: {get_param: pods_network_cidr} + ingress_controller: {get_param: ingress_controller} + ingress_controller_role: {get_param: ingress_controller_role} + octavia_ingress_controller_tag: {get_param: octavia_ingress_controller_tag} + kubelet_options: {get_param: kubelet_options} + kubeapi_options: {get_param: kubeapi_options} + kubeproxy_options: {get_param: kubeproxy_options} + kubecontroller_options: {get_param: kubecontroller_options} + kubescheduler_options: {get_param: kubescheduler_options} + octavia_enabled: {get_param: octavia_enabled} + kube_service_account_key: {get_param: kube_service_account_key} + kube_service_account_private_key: {get_param: kube_service_account_private_key} + prometheus_tag: {get_param: prometheus_tag} + grafana_tag: {get_param: grafana_tag} + heat_container_agent_tag: {get_param: heat_container_agent_tag} + keystone_auth_enabled: {get_param: keystone_auth_enabled} + k8s_keystone_auth_tag: {get_param: k8s_keystone_auth_tag} + monitoring_enabled: {get_param: monitoring_enabled} + prometheus_operator_chart_tag: {get_param: prometheus_operator_chart_tag} + project_id: {get_param: project_id} + tiller_enabled: {get_param: tiller_enabled} + tiller_tag: {get_param: tiller_tag} + tiller_namespace: {get_param: tiller_namespace} + node_problem_detector_tag: {get_param: node_problem_detector_tag} + nginx_ingress_controller_tag: {get_param: nginx_ingress_controller_tag} + auto_healing_enabled: {get_param: auto_healing_enabled} + auto_healing_controller: {get_param: auto_healing_controller} + magnum_auto_healer_tag: {get_param: magnum_auto_healer_tag} + auto_scaling_enabled: {get_param: auto_scaling_enabled} + draino_tag: {get_param: draino_tag} + autoscaler_tag: {get_param: autoscaler_tag} + min_node_count: {get_param: min_node_count} + max_node_count: {get_param: max_node_count} + npd_enabled: {get_param: npd_enabled} + + kube_cluster_config: + condition: create_cluster_resources + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager.sh} + params: + "$CA_KEY": {get_param: ca_key} + - get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh + - get_file: ../../common/templates/kubernetes/fragments/core-dns-service.sh + - get_file: ../../common/templates/kubernetes/fragments/calico-service.sh + - get_file: ../../common/templates/kubernetes/fragments/flannel-service.sh + - get_file: ../../common/templates/kubernetes/fragments/enable-helm-tiller.sh + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/enable-prometheus-monitoring.sh} + params: + "$ADMIN_PASSWD": {get_param: grafana_admin_passwd} + - str_replace: + params: + $enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik.sh} + $enable-ingress-octavia: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-octavia.sh} + template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller.sh} + - get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/enable-keystone-auth.sh} + params: + "$KEYSTONE_AUTH_DEFAULT_POLICY": {get_param: keystone_auth_default_policy} + - get_file: ../../common/templates/kubernetes/fragments/enable-auto-healing.sh + - get_file: ../../common/templates/kubernetes/fragments/enable-auto-scaling.sh + # Helm Based Installation Configuration Scripts + - get_file: ../../common/templates/kubernetes/helm/metrics-server.sh + - str_replace: + template: {get_file: ../../common/templates/kubernetes/helm/prometheus-operator.sh} + params: + "${ADMIN_PASSWD}": {get_param: grafana_admin_passwd} + - get_file: ../../common/templates/kubernetes/helm/ingress-nginx.sh + - get_file: ../../common/templates/kubernetes/fragments/install-helm-modules.sh + + kube_cluster_deploy: + condition: create_cluster_resources + type: OS::Heat::SoftwareDeployment + properties: + actions: ['CREATE'] + signal_transport: HEAT_SIGNAL + config: + get_resource: kube_cluster_config + server: + get_attr: [kube_masters, resource.0] + + + ###################################################################### + # + # kubernetes minions. This is an resource group that will initially + # create minions, and needs to be manually scaled. + # + + kube_minions: + condition: worker_only + type: OS::Heat::ResourceGroup + depends_on: + - network + update_policy: + rolling_update: {max_batch_size: {get_param: update_max_batch_size}, pause_time: 30} + properties: + count: {get_param: number_of_minions} + removal_policies: [{resource_list: {get_param: minions_to_remove}}] + resource_def: + type: kubeminion.yaml + properties: + name: + list_join: + - '-' + - [{ get_param: 'OS::stack_name' }, 'node', '%index%'] + prometheus_monitoring: {get_param: prometheus_monitoring} + ssh_key_name: {get_param: ssh_key_name} + ssh_public_key: {get_param: ssh_public_key} + server_image: {get_param: minion_image} + minion_flavor: {get_param: minion_flavor} + fixed_network: + if: + - create_cluster_resources + - get_attr: [network, fixed_network] + - get_param: fixed_network + fixed_subnet: + if: + - create_cluster_resources + - get_attr: [network, fixed_subnet] + - get_param: fixed_subnet + network_driver: {get_param: network_driver} + flannel_network_cidr: {get_param: flannel_network_cidr} + kube_master_ip: + if: + - create_cluster_resources + - get_attr: [api_address_lb_switch, private_ip] + - get_param: existing_master_private_ip + etcd_server_ip: + if: + - create_cluster_resources + - get_attr: [etcd_address_lb_switch, private_ip] + - get_param: existing_master_private_ip + external_network: {get_param: external_network} + kube_allow_priv: {get_param: kube_allow_priv} + boot_volume_size: {get_param: boot_volume_size} + boot_volume_type: {get_param: boot_volume_type} + docker_volume_size: {get_param: docker_volume_size} + docker_volume_type: {get_param: docker_volume_type} + docker_storage_driver: {get_param: docker_storage_driver} + cgroup_driver: {get_param: cgroup_driver} + wait_condition_timeout: {get_param: wait_condition_timeout} + registry_enabled: {get_param: registry_enabled} + registry_port: {get_param: registry_port} + swift_region: {get_param: swift_region} + registry_container: {get_param: registry_container} + registry_insecure: {get_param: registry_insecure} + registry_chunksize: {get_param: registry_chunksize} + cluster_uuid: {get_param: cluster_uuid} + magnum_url: {get_param: magnum_url} + volume_driver: {get_param: volume_driver} + region_name: {get_param: region_name} + auth_url: {get_param: auth_url} + username: {get_param: username} + password: {get_param: password} + kubernetes_port: {get_param: kubernetes_port} + tls_disabled: {get_param: tls_disabled} + verify_ca: {get_param: verify_ca} + secgroup_kube_minion_id: + if: + - create_cluster_resources + - get_resource: secgroup_kube_minion + - get_param: existing_security_group + http_proxy: {get_param: http_proxy} + https_proxy: {get_param: https_proxy} + no_proxy: {get_param: no_proxy} + kube_tag: {get_param: minion_kube_tag} + kube_version: {get_param: kube_version} + trustee_user_id: {get_param: trustee_user_id} + trustee_username: {get_param: trustee_username} + trustee_password: {get_param: trustee_password} + trustee_domain_id: {get_param: trustee_domain_id} + trust_id: {get_param: trust_id} + cloud_provider_enabled: {get_param: cloud_provider_enabled} + insecure_registry_url: {get_param: insecure_registry_url} + container_infra_prefix: {get_param: container_infra_prefix} + dns_service_ip: {get_param: dns_service_ip} + dns_cluster_domain: {get_param: dns_cluster_domain} + openstack_ca: {get_param: openstack_ca} + nodes_server_group_id: {get_resource: worker_nodes_server_group} + availability_zone: {get_param: availability_zone} + pods_network_cidr: {get_param: pods_network_cidr} + kubelet_options: {get_param: kubelet_options} + kubeproxy_options: {get_param: kubeproxy_options} + octavia_enabled: {get_param: octavia_enabled} + heat_container_agent_tag: {get_param: heat_container_agent_tag} + auto_healing_enabled: {get_param: auto_healing_enabled} + npd_enabled: {get_param: npd_enabled} + auto_healing_controller: {get_param: auto_healing_controller} + +outputs: + + api_address: + condition: create_cluster_resources + value: + str_replace: + template: api_ip_address + params: + api_ip_address: {get_attr: [api_address_floating_switch, ip_address]} + description: > + This is the API endpoint of the Kubernetes cluster. Use this to access + the Kubernetes API. + + registry_address: + condition: create_cluster_resources + value: + str_replace: + template: localhost:port + params: + port: {get_param: registry_port} + description: + This is the url of docker registry server where you can store docker + images. + + kube_masters_private: + condition: master_only + value: {get_attr: [kube_masters, kube_master_ip]} + description: > + This is a list of the "private" IP addresses of all the Kubernetes masters. + + kube_masters: + condition: master_only + value: {get_attr: [kube_masters, kube_master_external_ip]} + description: > + This is a list of the "public" IP addresses of all the Kubernetes masters. + Use these IP addresses to log in to the Kubernetes masters via ssh. + + kube_minions_private: + condition: worker_only + value: {get_attr: [kube_minions, kube_minion_ip]} + description: > + This is a list of the "private" IP addresses of all the Kubernetes minions. + + kube_minions: + condition: worker_only + value: {get_attr: [kube_minions, kube_minion_external_ip]} + description: > + This is a list of the "public" IP addresses of all the Kubernetes minions. + Use these IP addresses to log in to the Kubernetes minions via ssh. diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml new file mode 100644 index 0000000000..0430e7e9af --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubemaster.yaml @@ -0,0 +1,853 @@ +heat_template_version: queens + +description: > + This is a nested stack that defines a single Kubernetes master, This stack is + included by an ResourceGroup resource in the parent template + (kubecluster.yaml). + +parameters: + + name: + type: string + description: server name + + server_image: + type: string + description: glance image used to boot the server + + master_flavor: + type: string + description: flavor to use when booting the server + + ssh_key_name: + type: string + description: name of ssh key to be provisioned on our server + + ssh_public_key: + type: string + description: The public ssh key to add in all nodes + + external_network: + type: string + description: uuid of a network to use for floating ip addresses + + portal_network_cidr: + type: string + description: > + address range used by kubernetes for service portals + + kube_allow_priv: + type: string + description: > + whether or not kubernetes should permit privileged containers. + constraints: + - allowed_values: ["true", "false"] + + boot_volume_size: + type: number + description: > + size of the cinder boot volume for nodes root volume + default: 0 + + boot_volume_type: + type: string + description: > + type of the cinder boot volume for nodes root volume + + etcd_volume_size: + type: number + description: > + size of a cinder volume to allocate for etcd storage + + etcd_volume_type: + type: string + description: > + type of a cinder volume to allocate for etcd storage + + docker_volume_size: + type: number + description: > + size of a cinder volume to allocate to docker for container/image + storage + + docker_volume_type: + type: string + description: > + type of a cinder volume to allocate to docker for container/image + storage + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + + cgroup_driver: + type: string + description: > + cgroup driver name that kubelet should use, ideally the same as + the docker cgroup driver. + default: "cgroupfs" + + volume_driver: + type: string + description: volume driver to use for container storage + + region_name: + type: string + description: A logically separate section of the cluster + + flannel_network_cidr: + type: string + description: network range for flannel overlay network + + flannel_network_subnetlen: + type: number + description: size of subnet assigned to each master + + flannel_backend: + type: string + description: > + specify the backend for flannel, default udp backend + constraints: + - allowed_values: ["udp", "vxlan", "host-gw"] + + system_pods_initial_delay: + type: number + description: > + health check, time to wait for system pods (podmaster, scheduler) to boot + (in seconds) + default: 30 + + system_pods_timeout: + type: number + description: > + health check, timeout for system pods (podmaster, scheduler) to answer. + (in seconds) + default: 5 + + admission_control_list: + type: string + description: > + List of admission control plugins to activate + + discovery_url: + type: string + description: > + Discovery URL used for bootstrapping the etcd cluster. + + tls_disabled: + type: boolean + description: whether or not to enable TLS + + traefik_ingress_controller_tag: + type: string + description: tag of the traefik containers to be used. + + kube_dashboard_enabled: + type: boolean + description: whether or not to disable kubernetes dashboard + + influx_grafana_dashboard_enabled: + type: boolean + description: Enable influxdb with grafana dashboard for data from heapster + + verify_ca: + type: boolean + description: whether or not to validate certificate authority + + kubernetes_port: + type: number + description: > + The port which are used by kube-apiserver to provide Kubernetes + service. + + cluster_uuid: + type: string + description: identifier for the cluster this template is generating + + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + + prometheus_monitoring: + type: boolean + description: > + whether or not to have prometheus and grafana deployed + + grafana_admin_passwd: + type: string + hidden: true + description: > + admin user password for the Grafana monitoring interface + + api_public_address: + type: string + description: Public IP address of the Kubernetes master server. + default: "" + + api_private_address: + type: string + description: Private IP address of the Kubernetes master server. + default: "" + + fixed_network: + type: string + description: Network from which to allocate fixed addresses. + + fixed_network_name: + type: string + description: Network from which to allocate fixed addresses. + + fixed_subnet: + type: string + description: Subnet from which to allocate fixed addresses. + + network_driver: + type: string + description: network driver to use for instantiating container networks + + secgroup_kube_master_id: + type: string + description: ID of the security group for kubernetes master. + + api_pool_id: + type: string + description: ID of the load balancer pool of k8s API server. + + etcd_pool_id: + type: string + description: ID of the load balancer pool of etcd server. + + auth_url: + type: string + description: > + url for kubernetes to authenticate + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password + + http_proxy: + type: string + description: http proxy address for docker + + https_proxy: + type: string + description: https proxy address for docker + + no_proxy: + type: string + description: no proxies for docker + + kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + + cloud_provider_tag: + type: string + description: + tag of the kubernetes/cloud-provider-openstack + https://hub.docker.com/r/k8scloudprovider/openstack-cloud-controller-manager/tags/ + + cloud_provider_enabled: + type: boolean + description: Enable or disable the openstack kubernetes cloud provider + + etcd_tag: + type: string + description: tag of the etcd system container + + coredns_tag: + type: string + description: tag of the coredns container + + flannel_tag: + type: string + description: tag of the flannel system containers + + flannel_cni_tag: + type: string + description: tag of the flannel cni container + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + + kube_dashboard_version: + type: string + description: version of kubernetes dashboard used for kubernetes cluster + + trustee_user_id: + type: string + description: user id of the trustee + + trustee_password: + type: string + description: password of the trustee + hidden: true + + trust_id: + type: string + description: id of the trust which is used by the trustee + hidden: true + + insecure_registry_url: + type: string + description: insecure registry url + + container_infra_prefix: + type: string + description: > + prefix of container images used in the cluster, kubernetes components, + kubernetes-dashboard, coredns etc + + etcd_lb_vip: + type: string + description: > + etcd lb vip private used to generate certs on master. + default: "" + + dns_service_ip: + type: string + description: > + address used by Kubernetes DNS service + + dns_cluster_domain: + type: string + description: > + domain name for cluster DNS + + openstack_ca: + type: string + description: The OpenStack CA certificate to install on the node. + + nodes_server_group_id: + type: string + description: ID of the server group for kubernetes cluster nodes. + + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + + ca_key: + type: string + description: key of internal ca for the kube certificate api manager + hidden: true + + cert_manager_api: + type: boolean + description: true if the kubernetes cert api manager should be enabled + default: false + + calico_tag: + type: string + description: tag of the calico containers used to provision the calico node + + calico_kube_controllers_tag: + type: string + description: tag of the kube_controllers used to provision the calico node + + calico_ipv4pool: + type: string + description: Configure the IP pool from which Pod IPs will be chosen + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + ingress_controller: + type: string + description: > + ingress controller backend to use + + ingress_controller_role: + type: string + description: > + node role where the ingress controller should run + + octavia_ingress_controller_tag: + type: string + description: Octavia ingress controller docker image tag. + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + + kubeapi_options: + type: string + description: > + additional options to be passed to the api + + kubecontroller_options: + type: string + description: > + additional options to be passed to the controller manager + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + + kubescheduler_options: + type: string + description: > + additional options to be passed to the scheduler + + octavia_enabled: + type: boolean + description: > + whether or not to use Octavia for LoadBalancer type service. + default: False + + kube_service_account_key: + type: string + hidden: true + description: > + The signed cert will be used to verify the k8s service account tokens + during authentication. + + kube_service_account_private_key: + type: string + hidden: true + description: > + The private key will be used to sign generated k8s service account + tokens. + + prometheus_tag: + type: string + description: tag of prometheus container + + grafana_tag: + type: string + description: tag of grafana container + + heat_container_agent_tag: + type: string + description: tag of the heat_container_agent system container + + keystone_auth_enabled: + type: boolean + description: > + true if the keystone authN and authZ should be enabled + default: + false + + k8s_keystone_auth_tag: + type: string + description: tag of the k8s_keystone_auth container + + monitoring_enabled: + type: boolean + description: Enable or disable prometheus-operator monitoring solution. + default: false + + prometheus_operator_chart_tag: + type: string + description: The stable/prometheus-operator chart version to use. + default: 5.12.3 + + project_id: + type: string + description: > + project id of current project + + tiller_enabled: + type: string + description: Whether to enable tiller or not + + tiller_tag: + type: string + description: tag of tiller container + + tiller_namespace: + type: string + description: namespace where tiller will be installed + + auto_healing_enabled: + type: boolean + description: > + true if the auto healing feature should be enabled + + auto_healing_controller: + type: string + description: > + The service to be deployed for auto-healing. + default: "draino" + + magnum_auto_healer_tag: + type: string + description: tag of the magnum-auto-healer service. + default: "v1.15.0" + + auto_scaling_enabled: + type: boolean + description: > + true if the auto scaling feature should be enabled + + node_problem_detector_tag: + type: string + description: tag of the node problem detector container + + nginx_ingress_controller_tag: + type: string + description: nginx ingress controller docker image tag + + draino_tag: + type: string + description: tag of the draino container + + autoscaler_tag: + type: string + description: tag of the autoscaler container + + min_node_count: + type: number + description: > + minimum node count of cluster workers when doing scale down + + max_node_count: + type: number + description: > + maximum node count of cluster workers when doing scale up + + npd_enabled: + type: boolean + description: > + true if the npd service should be launched + default: + true + +conditions: + + image_based: {equals: [{get_param: boot_volume_size}, 0]} + volume_based: + not: + equals: + - get_param: boot_volume_size + - 0 + +resources: + ###################################################################### + # + # resource that exposes the IPs of either the kube master or the API + # LBaaS pool depending on whether LBaaS is enabled for the cluster. + # + + api_address_switch: + type: Magnum::ApiGatewaySwitcher + properties: + pool_public_ip: {get_param: api_public_address} + pool_private_ip: {get_param: api_private_address} + master_public_ip: {get_attr: [kube_master_floating, floating_ip_address]} + master_private_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + + ###################################################################### + # + # software configs. these are components that are combined into + # a multipart MIME user-data archive. + # + + agent_config: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + list_join: + - "\n" + - + - str_replace: + template: {get_file: user_data.json} + params: + $HOSTNAME: {get_param: name} + $SSH_KEY_VALUE: {get_param: ssh_public_key} + $OPENSTACK_CA: {get_param: openstack_ca} + + master_config: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params-master.sh} + params: + "$INSTANCE_NAME": {get_param: name} + "$PROMETHEUS_MONITORING": {get_param: prometheus_monitoring} + "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} + "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} + "$KUBE_API_PORT": {get_param: kubernetes_port} + "$KUBE_NODE_PUBLIC_IP": {get_attr: [kube_master_floating, floating_ip_address]} + "$KUBE_NODE_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} + "$ETCD_VOLUME": {get_resource: etcd_volume} + "$ETCD_VOLUME_SIZE": {get_param: etcd_volume_size} + "$DOCKER_VOLUME": {get_resource: docker_volume} + "$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size} + "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} + "$CGROUP_DRIVER": {get_param: cgroup_driver} + "$NETWORK_DRIVER": {get_param: network_driver} + "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} + "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} + "$FLANNEL_BACKEND": {get_param: flannel_backend} + "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} + "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} + "$PODS_NETWORK_CIDR": {get_param: pods_network_cidr} + "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} + "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} + "$ETCD_DISCOVERY_URL": {get_param: discovery_url} + "$AUTH_URL": {get_param: auth_url} + "$USERNAME": {get_param: username} + "$PASSWORD": {get_param: password} + "$CLUSTER_NETWORK": {get_param: fixed_network} + "$CLUSTER_NETWORK_NAME": {get_param: fixed_network_name} + "$CLUSTER_SUBNET": {get_param: fixed_subnet} + "$TLS_DISABLED": {get_param: tls_disabled} + "$TRAEFIK_INGRESS_CONTROLLER_TAG": {get_param: traefik_ingress_controller_tag} + "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} + "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} + "$VERIFY_CA": {get_param: verify_ca} + "$CLUSTER_UUID": {get_param: cluster_uuid} + "$MAGNUM_URL": {get_param: magnum_url} + "$VOLUME_DRIVER": {get_param: volume_driver} + "$REGION_NAME": {get_param: region_name} + "$HTTP_PROXY": {get_param: http_proxy} + "$HTTPS_PROXY": {get_param: https_proxy} + "$NO_PROXY": {get_param: no_proxy} + "$KUBE_TAG": {get_param: kube_tag} + "$CLOUD_PROVIDER_TAG": {get_param: cloud_provider_tag} + "$CLOUD_PROVIDER_ENABLED": {get_param: cloud_provider_enabled} + "$ETCD_TAG": {get_param: etcd_tag} + "$COREDNS_TAG": {get_param: coredns_tag} + "$FLANNEL_TAG": {get_param: flannel_tag} + "$FLANNEL_CNI_TAG": {get_param: flannel_cni_tag} + "$KUBE_VERSION": {get_param: kube_version} + "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} + "$TRUSTEE_USER_ID": {get_param: trustee_user_id} + "$TRUSTEE_PASSWORD": {get_param: trustee_password} + "$TRUST_ID": {get_param: trust_id} + "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} + "$CONTAINER_INFRA_PREFIX": {get_param: container_infra_prefix} + "$ETCD_LB_VIP": {get_param: etcd_lb_vip} + "$DNS_SERVICE_IP": {get_param: dns_service_ip} + "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} + "$CERT_MANAGER_API": {get_param: cert_manager_api} + "$CA_KEY": {get_param: ca_key} + "$CALICO_TAG": {get_param: calico_tag} + "$CALICO_KUBE_CONTROLLERS_TAG": {get_param: calico_kube_controllers_tag} + "$CALICO_IPV4POOL": {get_param: calico_ipv4pool} + "$INGRESS_CONTROLLER": {get_param: ingress_controller} + "$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role} + "$OCTAVIA_INGRESS_CONTROLLER_TAG": {get_param: octavia_ingress_controller_tag} + "$KUBELET_OPTIONS": {get_param: kubelet_options} + "$KUBEAPI_OPTIONS": {get_param: kubeapi_options} + "$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options} + "$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options} + "$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options} + "$OCTAVIA_ENABLED": {get_param: octavia_enabled} + "$KUBE_SERVICE_ACCOUNT_KEY": {get_param: kube_service_account_key} + "$KUBE_SERVICE_ACCOUNT_PRIVATE_KEY": {get_param: kube_service_account_private_key} + "$PROMETHEUS_TAG": {get_param: prometheus_tag} + "$GRAFANA_TAG": {get_param: grafana_tag} + "$HEAT_CONTAINER_AGENT_TAG": {get_param: heat_container_agent_tag} + "$KEYSTONE_AUTH_ENABLED": {get_param: keystone_auth_enabled} + "$K8S_KEYSTONE_AUTH_TAG": {get_param: k8s_keystone_auth_tag} + "$MONITORING_ENABLED": {get_param: monitoring_enabled} + "$PROMETHEUS_OPERATOR_CHART_TAG": {get_param: prometheus_operator_chart_tag} + "$PROJECT_ID": {get_param: project_id} + "$EXTERNAL_NETWORK_ID": {get_param: external_network} + "$TILLER_ENABLED": {get_param: tiller_enabled} + "$TILLER_TAG": {get_param: tiller_tag} + "$TILLER_NAMESPACE": {get_param: tiller_namespace} + "$NODE_PROBLEM_DETECTOR_TAG": {get_param: node_problem_detector_tag} + "$NGINX_INGRESS_CONTROLLER_TAG": {get_param: nginx_ingress_controller_tag} + "$AUTO_HEALING_ENABLED": {get_param: auto_healing_enabled} + "$AUTO_HEALING_CONTROLLER": {get_param: auto_healing_controller} + "$MAGNUM_AUTO_HEALER_TAG": {get_param: magnum_auto_healer_tag} + "$AUTO_SCALING_ENABLED": {get_param: auto_scaling_enabled} + "$DRAINO_TAG": {get_param: draino_tag} + "$AUTOSCALER_TAG": {get_param: autoscaler_tag} + "$MIN_NODE_COUNT": {get_param: min_node_count} + "$MAX_NODE_COUNT": {get_param: max_node_count} + "$NPD_ENABLED": {get_param: npd_enabled} + - get_file: ../../common/templates/kubernetes/fragments/make-cert.sh + - get_file: ../../common/templates/kubernetes/fragments/configure-etcd.sh + - get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh + - get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-master.sh + # TODO add docker_storage_setup + - get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh + - get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh + + master_config_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: master_config} + server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} + actions: ['CREATE'] + + ###################################################################### + # + # a single kubernetes master. + # + + kube_node_volume: + type: OS::Cinder::Volume + condition: volume_based + properties: + image: {get_param: server_image} + size: {get_param: boot_volume_size} + volume_type: {get_param: boot_volume_type} + + # do NOT use "_" (underscore) in the Nova server name + # it creates a mismatch between the generated Nova name and its hostname + # which can lead to weird problems + kube-master: + type: OS::Nova::Server + condition: image_based + properties: + name: {get_param: name} + image: {get_param: server_image} + flavor: {get_param: master_flavor} + user_data_format: SOFTWARE_CONFIG + software_config_transport: POLL_SERVER_HEAT + user_data: {get_resource: agent_config} + networks: + - port: {get_resource: kube_master_eth0} + scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} + + kube-master-bfv: + type: OS::Nova::Server + condition: volume_based + properties: + name: {get_param: name} + flavor: {get_param: master_flavor} + user_data_format: SOFTWARE_CONFIG + software_config_transport: POLL_SERVER_HEAT + user_data: {get_resource: agent_config} + networks: + - port: {get_resource: kube_master_eth0} + scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} + block_device_mapping_v2: + - boot_index: 0 + volume_id: {get_resource: kube_node_volume} + delete_on_termination: true + + kube_master_eth0: + type: OS::Neutron::Port + properties: + network: {get_param: fixed_network} + security_groups: + - {get_param: secgroup_kube_master_id} + fixed_ips: + - subnet: {get_param: fixed_subnet} + allowed_address_pairs: + - ip_address: {get_param: pods_network_cidr} + replacement_policy: AUTO + + kube_master_floating: + type: Magnum::Optional::KubeMaster::Neutron::FloatingIP + properties: + floating_network: {get_param: external_network} + port_id: {get_resource: kube_master_eth0} + depends_on: kube-master + + api_pool_member: + type: Magnum::Optional::Neutron::LBaaS::PoolMember + properties: + pool: {get_param: api_pool_id} + address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + subnet: { get_param: fixed_subnet } + protocol_port: {get_param: kubernetes_port} + + etcd_pool_member: + type: Magnum::Optional::Neutron::LBaaS::PoolMember + properties: + pool: {get_param: etcd_pool_id} + address: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + subnet: { get_param: fixed_subnet } + protocol_port: 2379 + + ###################################################################### + # + # etcd storage. This allocates a cinder volume and attaches it + # to the master. + # + + etcd_volume: + type: Magnum::Optional::Etcd::Volume + properties: + size: {get_param: etcd_volume_size} + volume_type: {get_param: etcd_volume_type} + + etcd_volume_attach: + type: Magnum::Optional::Etcd::VolumeAttachment + properties: + instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} + volume_id: {get_resource: etcd_volume} + mountpoint: /dev/vdc + + ###################################################################### + # + # docker storage. This allocates a cinder volume and attaches it + # to the minion. + # + + docker_volume: + type: Magnum::Optional::Cinder::Volume + properties: + size: {get_param: docker_volume_size} + volume_type: {get_param: docker_volume_type} + + docker_volume_attach: + type: Magnum::Optional::Cinder::VolumeAttachment + properties: + instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} + volume_id: {get_resource: docker_volume} + mountpoint: /dev/vdb + + upgrade_kubernetes: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: kube_tag_input + config: + get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh + + upgrade_kubernetes_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: upgrade_kubernetes} + server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} + actions: ['UPDATE'] + input_values: + kube_tag_input: {get_param: kube_tag} + +outputs: + + OS::stack_id: + value: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]} + + kube_master_ip: + value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + description: > + This is the "private" IP address of the Kubernetes master node. + + kube_master_external_ip: + value: {get_attr: [kube_master_floating, floating_ip_address]} + description: > + This is the "public" IP address of the Kubernetes master node. diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml new file mode 100644 index 0000000000..5bc57dd0e3 --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/kubeminion.yaml @@ -0,0 +1,553 @@ +heat_template_version: queens + +description: > + This is a nested stack that defines a single Kubernetes minion, This stack is + included by an AutoScalingGroup resource in the parent template + (kubecluster.yaml). + +parameters: + + name: + type: string + description: server name + + server_image: + type: string + description: glance image used to boot the server + + minion_flavor: + type: string + description: flavor to use when booting the server + + ssh_key_name: + type: string + description: name of ssh key to be provisioned on our server + + ssh_public_key: + type: string + description: name of ssh key to be provisioned on our server + + external_network: + type: string + description: uuid/name of a network to use for floating ip addresses + + kube_allow_priv: + type: string + description: > + whether or not kubernetes should permit privileged containers. + constraints: + - allowed_values: ["true", "false"] + + boot_volume_size: + type: number + description: > + size of the cinder boot volume + + boot_volume_type: + type: string + description: > + type of the cinder boot volume + + docker_volume_size: + type: number + description: > + size of a cinder volume to allocate to docker for container/image + storage + + docker_volume_type: + type: string + description: > + type of a cinder volume to allocate to docker for container/image + storage + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + + cgroup_driver: + type: string + description: > + cgroup driver name that kubelet should use, ideally the same as + the docker cgroup driver. + default: "cgroupfs" + + tls_disabled: + type: boolean + description: whether or not to enable TLS + + verify_ca: + type: boolean + description: whether or not to validate certificate authority + + kubernetes_port: + type: number + description: > + The port which are used by kube-apiserver to provide Kubernetes + service. + + cluster_uuid: + type: string + description: identifier for the cluster this template is generating + + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + + prometheus_monitoring: + type: boolean + description: > + whether or not to have the node-exporter running on the node + + kube_master_ip: + type: string + description: IP address of the Kubernetes master server. + + etcd_server_ip: + type: string + description: IP address of the Etcd server. + + fixed_network: + type: string + description: Network from which to allocate fixed addresses. + + fixed_subnet: + type: string + description: Subnet from which to allocate fixed addresses. + + network_driver: + type: string + description: network driver to use for instantiating container networks + + flannel_network_cidr: + type: string + description: network range for flannel overlay network + + wait_condition_timeout: + type: number + description : > + timeout for the Wait Conditions + + registry_enabled: + type: boolean + description: > + Indicates whether the docker registry is enabled. + + registry_port: + type: number + description: port of registry service + + swift_region: + type: string + description: region of swift service + + registry_container: + type: string + description: > + name of swift container which docker registry stores images in + + registry_insecure: + type: boolean + description: > + indicates whether to skip TLS verification between registry and backend storage + + registry_chunksize: + type: number + description: > + size fo the data segments for the swift dynamic large objects + + secgroup_kube_minion_id: + type: string + description: ID of the security group for kubernetes minion. + + volume_driver: + type: string + description: volume driver to use for container storage + + region_name: + type: string + description: A logically separate section of the cluster + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password, not set in current implementation, only used to + fill in for Kubernetes config file + hidden: true + + http_proxy: + type: string + description: http proxy address for docker + + https_proxy: + type: string + description: https proxy address for docker + + no_proxy: + type: string + description: no proxies for docker + + kube_tag: + type: string + description: tag of the k8s containers used to provision the kubernetes cluster + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + + trustee_domain_id: + type: string + description: domain id of the trustee + + trustee_user_id: + type: string + description: user id of the trustee + + trustee_username: + type: string + description: username of the trustee + + trustee_password: + type: string + description: password of the trustee + hidden: true + + trust_id: + type: string + description: id of the trust which is used by the trustee + hidden: true + + auth_url: + type: string + description: > + url for keystone, must be v2 since k8s backend only support v2 + at this point + + insecure_registry_url: + type: string + description: insecure registry url + + container_infra_prefix: + type: string + description: > + prefix of container images used in the cluster, kubernetes components, + kubernetes-dashboard, coredns etc + + dns_service_ip: + type: string + description: > + address used by Kubernetes DNS service + + dns_cluster_domain: + type: string + description: > + domain name for cluster DNS + + openstack_ca: + type: string + description: The OpenStack CA certificate to install on the node. + + nodes_server_group_id: + type: string + description: ID of the server group for kubernetes cluster nodes. + + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + + octavia_enabled: + type: boolean + description: > + whether or not to use Octavia for LoadBalancer type service. + default: False + + cloud_provider_enabled: + type: boolean + description: Enable or disable the openstack kubernetes cloud provider + + heat_container_agent_tag: + type: string + description: tag of the heat_container_agent system container + + auto_healing_enabled: + type: boolean + description: > + true if the auto healing feature should be enabled + + auto_healing_controller: + type: string + description: > + The service to be deployed for auto-healing. + default: "draino" + + npd_enabled: + type: boolean + description: > + true if the npd service should be launched + default: + true + +conditions: + + image_based: {equals: [{get_param: boot_volume_size}, 0]} + volume_based: + not: + equals: + - get_param: boot_volume_size + - 0 + +resources: + + agent_config: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + list_join: + - "\n" + - + - str_replace: + template: {get_file: user_data.json} + params: + $HOSTNAME: {get_param: name} + $SSH_KEY_VALUE: {get_param: ssh_public_key} + $OPENSTACK_CA: {get_param: openstack_ca} + + ###################################################################### + # + # software configs. these are components that are combined into + # a multipart MIME user-data archive. + # + + node_config: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - "\n" + - + - str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/write-heat-params.sh} + params: + $INSTANCE_NAME: {get_param: name} + $PROMETHEUS_MONITORING: {get_param: prometheus_monitoring} + $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} + $KUBE_MASTER_IP: {get_param: kube_master_ip} + $KUBE_API_PORT: {get_param: kubernetes_port} + $KUBE_NODE_PUBLIC_IP: {get_attr: [kube_minion_floating, floating_ip_address]} + $KUBE_NODE_IP: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} + $ETCD_SERVER_IP: {get_param: etcd_server_ip} + $DOCKER_VOLUME: {get_resource: docker_volume} + $DOCKER_VOLUME_SIZE: {get_param: docker_volume_size} + $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} + $CGROUP_DRIVER: {get_param: cgroup_driver} + $NETWORK_DRIVER: {get_param: network_driver} + $REGISTRY_ENABLED: {get_param: registry_enabled} + $REGISTRY_PORT: {get_param: registry_port} + $SWIFT_REGION: {get_param: swift_region} + $REGISTRY_CONTAINER: {get_param: registry_container} + $REGISTRY_INSECURE: {get_param: registry_insecure} + $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} + $TLS_DISABLED: {get_param: tls_disabled} + $VERIFY_CA: {get_param: verify_ca} + $CLUSTER_UUID: {get_param: cluster_uuid} + $MAGNUM_URL: {get_param: magnum_url} + $USERNAME: {get_param: username} + $PASSWORD: {get_param: password} + $VOLUME_DRIVER: {get_param: volume_driver} + $REGION_NAME: {get_param: region_name} + $HTTP_PROXY: {get_param: http_proxy} + $HTTPS_PROXY: {get_param: https_proxy} + $NO_PROXY: {get_param: no_proxy} + $KUBE_TAG: {get_param: kube_tag} + $FLANNEL_NETWORK_CIDR: {get_param: flannel_network_cidr} + $PODS_NETWORK_CIDR: {get_param: pods_network_cidr} + $KUBE_VERSION: {get_param: kube_version} + $TRUSTEE_USER_ID: {get_param: trustee_user_id} + $TRUSTEE_PASSWORD: {get_param: trustee_password} + $TRUST_ID: {get_param: trust_id} + $AUTH_URL: {get_param: auth_url} + $CLOUD_PROVIDER_ENABLED: {get_param: cloud_provider_enabled} + $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} + $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} + $DNS_SERVICE_IP: {get_param: dns_service_ip} + $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} + $KUBELET_OPTIONS: {get_param: kubelet_options} + $KUBEPROXY_OPTIONS: {get_param: kubeproxy_options} + $OCTAVIA_ENABLED: {get_param: octavia_enabled} + $HEAT_CONTAINER_AGENT_TAG: {get_param: heat_container_agent_tag} + $AUTO_HEALING_ENABLED: {get_param: auto_healing_enabled} + $AUTO_HEALING_CONTROLLER: {get_param: auto_healing_controller} + $NPD_ENABLED: {get_param: npd_enabled} + - get_file: ../../common/templates/kubernetes/fragments/write-kube-os-config.sh + - get_file: ../../common/templates/kubernetes/fragments/make-cert-client.sh + - get_file: ../../common/templates/fragments/configure-docker-registry.sh + - get_file: ../../common/templates/kubernetes/fragments/configure-kubernetes-minion.sh + - get_file: ../../common/templates/kubernetes/fragments/add-proxy.sh + # TODO add docker_storage_setup + - get_file: ../../common/templates/kubernetes/fragments/enable-services-minion.sh + - get_file: ../../common/templates/fragments/enable-docker-registry.sh + + node_config_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: node_config} + server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} + actions: ['CREATE'] + + ###################################################################### + # + # a single kubernetes minion. + # + + kube_node_volume: + type: OS::Cinder::Volume + condition: volume_based + properties: + image: {get_param: server_image} + size: {get_param: boot_volume_size} + volume_type: {get_param: boot_volume_type} + + # do NOT use "_" (underscore) in the Nova server name + # it creates a mismatch between the generated Nova name and its hostname + # which can lead to weird problems + kube-minion: + condition: image_based + type: OS::Nova::Server + properties: + name: {get_param: name} + flavor: {get_param: minion_flavor} + image: {get_param: server_image} + user_data: {get_resource: agent_config} + user_data_format: SOFTWARE_CONFIG + software_config_transport: POLL_SERVER_HEAT + networks: + - port: {get_resource: kube_minion_eth0} + scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} + + kube-minion-bfv: + condition: volume_based + type: OS::Nova::Server + properties: + name: {get_param: name} + flavor: {get_param: minion_flavor} + user_data: {get_resource: agent_config} + user_data_format: SOFTWARE_CONFIG + software_config_transport: POLL_SERVER_HEAT + networks: + - port: {get_resource: kube_minion_eth0} + scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} + block_device_mapping_v2: + - boot_index: 0 + volume_id: {get_resource: kube_node_volume} + delete_on_termination: true + + kube_minion_eth0: + type: OS::Neutron::Port + properties: + network: {get_param: fixed_network} + security_groups: + - get_param: secgroup_kube_minion_id + fixed_ips: + - subnet: {get_param: fixed_subnet} + allowed_address_pairs: + - ip_address: {get_param: pods_network_cidr} + replacement_policy: AUTO + + kube_minion_floating: + type: Magnum::Optional::KubeMinion::Neutron::FloatingIP + properties: + floating_network: {get_param: external_network} + port_id: {get_resource: kube_minion_eth0} + depends_on: kube-minion + + ###################################################################### + # + # docker storage. This allocates a cinder volume and attaches it + # to the minion. + # + + docker_volume: + type: Magnum::Optional::Cinder::Volume + properties: + size: {get_param: docker_volume_size} + volume_type: {get_param: docker_volume_type} + + docker_volume_attach: + type: Magnum::Optional::Cinder::VolumeAttachment + properties: + instance_uuid: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} + volume_id: {get_resource: docker_volume} + mountpoint: /dev/vdb + + upgrade_kubernetes: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: kube_tag_input + config: + get_file: ../../common/templates/kubernetes/fragments/upgrade-kubernetes.sh + + upgrade_kubernetes_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: upgrade_kubernetes} + server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} + actions: ['UPDATE'] + input_values: + kube_tag_input: {get_param: kube_tag} + +outputs: + + kube_minion_ip: + value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} + description: > + This is the "public" IP address of the Kubernetes minion node. + + kube_minion_external_ip: + value: {get_attr: [kube_minion_floating, floating_ip_address]} + description: > + This is the "public" IP address of the Kubernetes minion node. + + ###################################################################### + # + # NOTE(flwang): Returning the minion node server ID here so that + # consumer can send API request to Heat to remove a particular + # node with removal_policies. Otherwise, the consumer (e.g. AutoScaler) + # has to use index to do the remove which is confusing out of the + # OpenStack world. + # https://storyboard.openstack.org/#!/story/2005054 + # + ###################################################################### + + OS::stack_id: + value: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]} + description: > + This is the Nova server id of the node. diff --git a/magnum/drivers/k8s_fedora_coreos_v1/templates/user_data.json b/magnum/drivers/k8s_fedora_coreos_v1/templates/user_data.json new file mode 100644 index 0000000000..8597d11a8c --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/templates/user_data.json @@ -0,0 +1,84 @@ +{ + "ignition": { + "config": { + "replace": { + "source": null, + "verification": {} + } + }, + "security": { + "tls": {} + }, + "timeouts": {}, + "version": "3.0.0" + }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "$SSH_KEY_VALUE" + ] + } + ] + }, + "storage": { + "directories":[ + { + "path": "/var/lib/cloud/data", + "group": {"name": "root"}, + "user": {"name": "root"}, + "mode": 644 + }, + { + "path": "/var/lib/heat-cfntools", + "group": {"name": "root"}, + "user": {"name": "root"}, + "mode": 644 + } + ], + "files": [ + { + "filesystem": "root", + "path": "/etc/hostname", + "mode": 420, + "contents": { "source": "data:,$HOSTNAME" } + }, + { + "filesystem": "root", + "group": {"name": "root"}, + "path": "/etc/pki/ca-trust/source/anchors/openstack-ca.pem", + "user": {"name": "root"}, + "contents": { + "source": "data:,$OPENSTACK_CA", + "verification": {} + }, + "mode": 644 + }, + { + "user": {"name": "root"}, + "group": {"name": "root"}, + "mode": 700, + "path": "/root/configure-agent-env.sh", + "contents": { + "source": "data:,%23!%2Fbin%2Fbash%0A%0Aset%20-x%0Aset%20-e%0Aset%20%2Bu%0A%0Auntil%20%5B%20-f%20%2Fetc%2Fpki%2Fca-trust%2Fsource%2Fanchors%2Fopenstack-ca.pem%20%5D%0Ado%0A%20%20%20%20echo%20%22waiting%20for%20%2Fetc%2Fpki%2Fca-trust%2Fsource%2Fanchors%2Fopenstack-ca.pem%22%0A%20%20%20%20sleep%203s%0Adone%0A%2Fusr%2Fbin%2Fupdate-ca-trust%0A%0AHTTP_PROXY%3D%22%24HTTP_PROXY%22%0AHTTPS_PROXY%3D%22%24HTTPS_PROXY%22%0ANO_PROXY%3D%22%24NO_PROXY%22%0ACONTAINER_INFRA_PREFIX%3D%22%24CONTAINER_INFRA_PREFIX%22%0AHEAT_CONTAINER_AGENT_TAG%3D%22%24HEAT_CONTAINER_AGENT_TAG%22%0A%0A%0Aif%20%5B%20-n%20%22%24%7BHTTP_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20HTTP_PROXY%0Afi%0A%0Aif%20%5B%20-n%20%22%24%7BHTTPS_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20HTTPS_PROXY%0Afi%0A%0Aif%20%5B%20-n%20%22%24%7BNO_PROXY%7D%22%20%5D%3B%20then%0A%20%20%20%20export%20NO_PROXY%0Afi%0A%0A%23%20Create%20a%20keypair%20for%20the%20heat-container-agent%20to%0A%23%20access%20the%20node%20over%20ssh.%20It%20is%20useful%20to%20operate%0A%23%20in%20host%20mount%20namespace%20and%20apply%20configuration.%0Aid%0Amkdir%20-p%20%2Fsrv%2Fmagnum%2F.ssh%0Achmod%200700%20%2Fsrv%2Fmagnum%2F.ssh%0A%23touch%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Assh-keygen%20-q%20-t%20rsa%20-N%20''%20-f%20%2Ftmp%2Fheat_agent_rsa%0Amv%20%2Ftmp%2Fheat_agent_rsa%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Amv%20%2Ftmp%2Fheat_agent_rsa.pub%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%0Achmod%200400%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0Achmod%200400%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%0A%23%20Add%20the%20public%20to%20the%20host%20authorized_keys%20file.%0Amkdir%20-p%20%2Froot%2F.ssh%0Achmod%200700%20%2Froot%2F.ssh%0Acat%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa.pub%20%3E%20%2Froot%2F.ssh%2Fauthorized_keys%0A%23%20Add%20localost%20to%20know_hosts%0Assh-keyscan%20127.0.0.1%20%3E%20%2Fsrv%2Fmagnum%2F.ssh%2Fknown_hosts%0A%23%20ssh%20configguration%20file%2C%20to%20be%20specified%20with%20ssh%20-F%0Acat%20%3E%20%2Fsrv%2Fmagnum%2F.ssh%2Fconfig%20%3C%3CEOF%0AHost%20localhost%0A%20%20%20%20%20HostName%20127.0.0.1%0A%20%20%20%20%20User%20root%0A%20%20%20%20%20IdentityFile%20%2Fsrv%2Fmagnum%2F.ssh%2Fheat_agent_rsa%0A%20%20%20%20%20UserKnownHostsFile%20%2Fsrv%2Fmagnum%2F.ssh%2Fknown_hosts%0AEOF%0A%0Ased%20-i%20'%2F%5EPermitRootLogin%2F%20s%2F%20.*%2F%20without-password%2F'%20%2Fetc%2Fssh%2Fsshd_config%0A%23%20Security%20enhancement%3A%20Disable%20password%20authentication%0Ased%20-i%20'%2F%5EPasswordAuthentication%20yes%2F%20s%2F%20yes%2F%20no%2F'%20%2Fetc%2Fssh%2Fsshd_config%0A%0Asystemctl%20restart%20sshd%0A", + "verification": {} + } + } + ] + }, + "systemd": { + "units": [ + { + "name": "configure-agent-env.service", + "enabled": true, + "contents": "[Unit]\nDescription=Configure heat agent environment\nAfter=sshd.service\n\n[Service]\nUser=root\nGroup=root\nType=simple\nExecStart=/bin/bash /root/configure-agent-env.sh\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target" + }, + { + "name": "heat-container-agent.service", + "enabled": true, + "contents": "[Unit]\nDescription=Run heat-container-agent\nAfter=network-online.target configure-agent-env.service\nWants=network-online.target\n\n[Service]\nExecStartPre=mkdir -p /var/lib/heat-container-agent\nExecStartPre=mkdir -p /var/run/heat-config\nExecStartPre=mkdir -p /var/run/os-collect-config\nExecStartPre=mkdir -p /opt/stack/os-config-refresh\nExecStartPre=-mv /var/lib/os-collect-config/local-data /var/lib/cloud/data/cfn-init-data\nExecStartPre=mkdir -p /srv/magnum\nExecStartPre=-/bin/podman kill heat-container-agent\nExecStartPre=-/bin/podman rm heat-container-agent\nExecStartPre=-/bin/podman pull docker.io/openstackmagnum/heat-container-agent:train-dev\nExecStart=/bin/podman run \\\n --name heat-container-agent \\\n --privileged \\\n --volume /srv/magnum:/srv/magnum \\\n --volume /opt/stack/os-config-refresh:/opt/stack/os-config-refresh \\\n --volume /run/systemd:/run/systemd \\\n --volume /etc/:/etc/ \\\n --volume /var/lib:/var/lib \\\n --volume /var/run:/var/run \\\n --volume /var/log:/var/log \\\n --volume /tmp:/tmp \\\n --volume /dev:/dev \\\n --env REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/openstack-ca.pem --net=host \\\n docker.io/openstackmagnum/heat-container-agent:train-dev \\\n /usr/bin/start-heat-container-agent\nTimeoutStartSec=10min\n\nExecStop=/bin/podman stop heat-container-agent\n\n[Install]\nWantedBy=multi-user.target\n" + } + ] + } +} diff --git a/magnum/drivers/k8s_fedora_coreos_v1/version.py b/magnum/drivers/k8s_fedora_coreos_v1/version.py new file mode 100644 index 0000000000..2de5b8fc69 --- /dev/null +++ b/magnum/drivers/k8s_fedora_coreos_v1/version.py @@ -0,0 +1,17 @@ +# Copyright 2016 - Rackspace Hosting +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version = '1.0.0' +driver = 'k8s_fedora_atomic_v1' +container_version = '1.12.6' diff --git a/releasenotes/notes/fedora_coreos-e66b44d86dea380f.yaml b/releasenotes/notes/fedora_coreos-e66b44d86dea380f.yaml new file mode 100644 index 0000000000..01f9469cb5 --- /dev/null +++ b/releasenotes/notes/fedora_coreos-e66b44d86dea380f.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Add fedora coreos driver. To deploy clusters with fedora coreos operators + or users need to add os_distro=fedora-coreos to the image. The scripts + to deploy kubernetes on top are the same with fedora atomic. Note that + this driver has selinux enabled. +issues: + - | + The startup of the heat-container-agent uses a workaround to copy the + SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data. + The fedora coreos driver requires heat train to support ignition. diff --git a/setup.cfg b/setup.cfg index 27e715ea10..2eb83d296f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -61,6 +61,7 @@ oslo.policy.policies = magnum.drivers = k8s_fedora_atomic_v1 = magnum.drivers.k8s_fedora_atomic_v1.driver:Driver + k8s_fedora_coreos_v1 = magnum.drivers.k8s_fedora_coreos_v1.driver:Driver k8s_coreos_v1 = magnum.drivers.k8s_coreos_v1.driver:Driver swarm_fedora_atomic_v1 = magnum.drivers.swarm_fedora_atomic_v1.driver:Driver swarm_fedora_atomic_v2 = magnum.drivers.swarm_fedora_atomic_v2.driver:Driver