ansible-playbooks/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/main.yml

185 lines
7.8 KiB
YAML

---
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# ROLE DESCRIPTION:
# This role is to bring up Kubernetes and essential flock services required
# for initial controller unlock.
#
- block:
- name: Add loopback interface
# Use shell instead of command module as source is an internal shell command
shell: "{{ item }}"
register: add_addresses
failed_when: false
with_items:
- source /etc/platform/openrc; system host-if-add controller-0 lo virtual none lo -c platform -m 1500
- source /etc/platform/openrc; system interface-network-assign controller-0 lo mgmt
- source /etc/platform/openrc; system interface-network-assign controller-0 lo cluster-host
- ip addr add {{ cluster_virtual }} brd {{ cluster_broadcast }} dev lo scope host label lo:5
- ip addr add {{ mgmt_virtual }} brd {{ management_broadcast }} dev lo scope host label lo:1
- ip addr add {{ pxe_virtual }} dev lo scope host
- ip addr add {{ cluster_floating_virtual }} dev lo scope host
- ip addr add {{ mgmt_nfs_1_virtual }} dev lo scope host
- ip addr add {{ mgmt_nfs_2_virtual }} dev lo scope host
- name: Fail if adding interface addresses failed for reason other than it has been done before
fail:
msg: "{{ item.item }} failed for reason: {{ item.stderr }}."
when: item.rc != 0 and not incomplete_bootstrap
with_items: "{{ add_addresses.results }}"
- name: Remove previous management floating address if management network config has changed
command: ip addr delete {{ prev_mgmt_floating_virtual }} dev lo scope host
when: last_config_file_exists and reconfigure_endpoints and
(mgmt_floating_virtual != prev_mgmt_floating_virtual)
- name: Refresh local DNS (i.e. /etc/hosts)
include: refresh_local_dns.yml
- name: Load images from archives if configured
include: load_images_from_archive.yml
when: images_archive_exists
- name: Bring up Kubernetes master
include: bringup_kubemaster.yml
- name: Bring up Helm
include: bringup_helm.yml
- name: Set up controller registry certificate and keys
include: setup_registry_certificate_and_keys.yml
- name: Bring up essential flock services
include: bringup_flock_services.yml
- name: Set dnsmasq.leases flag for unlock
file:
path: "{{ config_permdir }}/dnsmasq.leases"
state: touch
- name: Update resolv.conf file for unlock
lineinfile:
path: /etc/resolv.conf
line: "nameserver {{ controller_floating_address }}"
insertbefore: BOF
- name: Check for controller-0 online status
shell: source /etc/platform/openrc; system host-list | grep online
register: online_check
until: online_check.rc == 0
retries: 10
- name: Wait for {{ pods_wait_time }} seconds to ensure kube-system pods are all started
wait_for:
timeout: "{{ pods_wait_time }}"
- name: Start parallel tasks to wait for Kubernetes component, Networking and Tiller pods to reach ready state
command: >-
kubectl --kubeconfig=/etc/kubernetes/admin.conf wait --namespace=kube-system
--for=condition=Ready pods --selector {{ item }} --timeout=30s
async: 30
poll: 0
with_items:
- k8s-app=calico-node
- k8s-app=calico-kube-controllers
- k8s-app=kube-proxy
- app=multus
- app=sriov-cni
- app=helm
- component=kube-apiserver
- component=kube-controller-manager
- component=kube-scheduler
register: wait_for_pods
- name: Get wait tasks results
async_status:
jid: "{{ item.ansible_job_id }}"
register: wait_job_result
until: wait_job_result.finished
# Set the retry to 10 times (60 seconds) but the async jobs above will
# complete (success or failure) within 30 seconds
retries: 10
with_items: "{{ wait_for_pods.results }}"
- name: Fail if any of the Kubernetes component, Networking and Tiller pods is not ready by this time
fail:
msg: "Pod {{ item._ansible_item_label._ansible_item_label }} is still not ready."
when: item.stdout is not search(" condition met")
with_items: "{{ wait_job_result.results }}"
# Have to check for kube-dns pods separately as at most only one is
# running at this point so checking for "Ready" condition at kube-dns
# app level won't work
- name: Fail if no kube-dns pod is running
shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods --namespace=kube-system | grep coredns | grep Running
register: dns_pod_result
failed_when: dns_pod_result.rc != 0
when: (not replayed) or (restart_services)
- name: Remove config file from previous play
file:
path: "{{ last_bootstrap_config_file }}"
state: absent
- name: Save the current system and network config for reference in subsequent replays
lineinfile:
# This file should be cleared upon host reboot
path: "{{ last_bootstrap_config_file }}"
line: "{{ item }}"
create: yes
with_items:
- "prev_system_mode: {{ system_mode }}"
- "prev_timezone: {{ timezone }}"
- "prev_distributed_cloud_role: {{ distributed_cloud_role }}"
- "prev_management_dynamic_address_allocation: {{ management_dynamic_address_allocation }}"
- "prev_cluster_host_dynamic_address_allocation: {{ cluster_host_dynamic_address_allocation }}"
- "prev_pxeboot_subnet: {{ pxeboot_subnet }}"
- "prev_management_subnet: {{ management_subnet }}"
- "prev_cluster_host_subnet: {{ cluster_host_subnet }}"
- "prev_cluster_pod_subnet: {{ cluster_pod_subnet }}"
- "prev_cluster_service_subnet: {{ cluster_service_subnet }}"
- "prev_external_oam_subnet: {{ external_oam_subnet }}"
- "prev_external_oam_gateway_address: {{ external_oam_gateway_address }}"
- "prev_external_oam_floating_address: {{ external_oam_floating_address }}"
- "prev_management_multicast_subnet: {{ management_multicast_subnet }}"
- "prev_dns_servers: {{ dns_servers | join(',') }}"
- "prev_docker_http_proxy: {{ docker_http_proxy }}"
- "prev_docker_https_proxy: {{ docker_https_proxy }}"
- "prev_docker_no_proxy: {{ docker_no_proxy | sort | join(',') }}"
# Store the addresses as values determined in prepare-env stage not as merged values in
# validate-config stage as the latter requires subnet validation.
- "prev_pxeboot_start_address: {{ pxeboot_start_address }}"
- "prev_pxeboot_end_address: {{ pxeboot_end_address }}"
- "prev_management_start_address: {{ management_start_address }}"
- "prev_management_end_address: {{ management_end_address }}"
- "prev_cluster_host_start_address: {{ cluster_host_start_address }}"
- "prev_cluster_host_end_address: {{ cluster_host_end_address }}"
- "prev_cluster_pod_start_address: {{ cluster_pod_start_address }}"
- "prev_cluster_pod_end_address: {{ cluster_pod_end_address }}"
- "prev_cluster_service_start_address: {{ cluster_service_start_address }}"
- "prev_cluster_service_end_address: {{ cluster_service_end_address }}"
- "prev_external_oam_start_address: {{ external_oam_start_address }}"
- "prev_external_oam_end_address: {{ external_oam_end_address }}"
- "prev_management_multicast_start_address: {{ management_multicast_start_address }}"
- "prev_management_multicast_end_address: {{ management_multicast_end_address }}"
- "prev_external_oam_node_0_address: {{ external_oam_node_0_address }}"
- "prev_external_oam_node_1_address: {{ external_oam_node_1_address }}"
- "prev_apiserver_cert_sans: {{ apiserver_cert_sans | to_yaml }}"
- "prev_k8s_root_ca_cert: {{ k8s_root_ca_cert }}"
- "prev_k8s_root_ca_key: {{ k8s_root_ca_key }}"
- "prev_apiserver_oidc: {{ apiserver_oidc | to_yaml }}"
# Nested dictionaries are picky about having things on the same line
- "prev_docker_registries: "
- "{{ docker_registries | to_yaml }}"
- name: Mark the bootstrap as completed
file:
path: "{{ bootstrap_completed_flag }}"
state: touch