65a3a8245b
There was some recent changes to Minikube that added a hard dependency on that file being there which are not caught by upstream testing since they don't test with the "none" VM driver. This adds the file to workaround the issue so that it can complete the bootstrap of the cluster. Change-Id: I3869350ae620721ea4e755e50facd0291a7772d3
166 lines
4.5 KiB
YAML
166 lines
4.5 KiB
YAML
- name: Check for Minikube install
|
|
stat:
|
|
path: /tmp/minikube
|
|
register: stat_result
|
|
|
|
- name: Download Minikube
|
|
get_url:
|
|
url: https://storage.googleapis.com/minikube/releases/{{ minikube_version }}/minikube-linux-amd64
|
|
dest: /tmp/minikube
|
|
mode: 0755
|
|
when: not stat_result.stat.exists
|
|
|
|
- name: Run ensure-docker role
|
|
include_role:
|
|
name: ensure-docker
|
|
|
|
# Ubuntu focal doesn't have cri-o-1.15 packages, per distro tasks is
|
|
# required to install crio
|
|
- name: Install crio
|
|
when: kubernetes_runtime == 'cri-o'
|
|
include_tasks: "{{ zj_distro_os }}"
|
|
with_first_found:
|
|
- "crio-{{ ansible_distribution }}-{{ ansible_distribution_version }}.yaml"
|
|
- "crio-default.yaml"
|
|
loop_control:
|
|
loop_var: zj_distro_os
|
|
|
|
- name: Workaround missing 02-crio.conf
|
|
# See: https://github.com/kubernetes/minikube/issues/13816
|
|
when: kubernetes_runtime == 'cri-o'
|
|
block:
|
|
- name: Add misisng crio.conf.d folder
|
|
file:
|
|
path: /etc/crio/crio.conf.d
|
|
state: directory
|
|
mode: 0755
|
|
become: true
|
|
- name: Fix missing 02-crio.conf
|
|
copy:
|
|
content: |
|
|
[crio.image]
|
|
# pause_image = ""
|
|
|
|
[crio.network]
|
|
# cni_default_network = ""
|
|
|
|
[crio.runtime]
|
|
# cgroup_manager = ""
|
|
dest: /etc/crio/crio.conf.d/02-crio.conf
|
|
mode: 0644
|
|
become: true
|
|
|
|
- name: Create .kube directory
|
|
file:
|
|
path: "{{ ansible_user_dir }}/.kube"
|
|
state: directory
|
|
mode: 0755
|
|
|
|
- name: Create .kube/config file
|
|
file:
|
|
path: "{{ ansible_user_dir }}/.kube/config"
|
|
state: touch
|
|
mode: 0644
|
|
|
|
- name: Create .minikube directory
|
|
file:
|
|
path: "{{ ansible_user_dir }}/.minikube"
|
|
state: directory
|
|
mode: 0755
|
|
|
|
- name: Default args
|
|
set_fact:
|
|
extra_args: ""
|
|
|
|
- name: Configure dns options if set
|
|
block:
|
|
- name: Write resolv.conf
|
|
template:
|
|
src: resolv.conf.j2
|
|
dest: "{{ ansible_user_dir }}/.minikube/k8s_resolv.conf"
|
|
mode: "0444"
|
|
- name: Set extra kube setttings
|
|
set_fact:
|
|
extra_args: "--extra-config=kubelet.resolv-conf={{ ansible_user_dir }}/.minikube/k8s_resolv.conf"
|
|
when: minikube_dns_resolvers|length>0
|
|
|
|
- name: Start Minikube
|
|
become: yes
|
|
command: >-
|
|
/tmp/minikube start
|
|
--v=7
|
|
--vm-driver=none
|
|
--container-runtime={{ kubernetes_runtime }}
|
|
{{ extra_args }}
|
|
{% for _addon in ensure_kubernetes_minikube_addons %}
|
|
--addons={{ _addon }}
|
|
{% endfor %}
|
|
environment:
|
|
MINIKUBE_WANTUPDATENOTIFICATION: false
|
|
MINIKUBE_WANTREPORTERRORPROMPT: false
|
|
MINIKUBE_WANTNONEDRIVERWARNING: false
|
|
MINIKUBE_WANTKUBECTLDOWNLOADMSG: false
|
|
CHANGE_MINIKUBE_NONE_USER: true
|
|
MINIKUBE_HOME: "{{ ansible_user_dir }}"
|
|
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
|
|
|
|
- name: Get KUBECONFIG
|
|
command: "kubectl config view"
|
|
register: kubeconfig_yaml
|
|
|
|
- name: Parse KUBECONFIG YAML
|
|
set_fact:
|
|
kube_config: "{{ kubeconfig_yaml.stdout | from_yaml }}"
|
|
|
|
- name: Ensure minikube config is owned by ansible_user
|
|
become: yes
|
|
loop: "{{ kube_config['users'] }}"
|
|
loop_control:
|
|
loop_var: zj_item
|
|
file:
|
|
path: "{{ zj_item['user']['client-key'] }}"
|
|
owner: "{{ ansible_user }}"
|
|
|
|
- name: Get cluster info
|
|
command: kubectl cluster-info
|
|
|
|
- name: Concatenate the dns resolvers
|
|
# This is a hack to solve a temp problem.
|
|
# The problem is related to the resolv conf auto-setting function of the minikube v1.10.x.
|
|
# Zuul uses ubound as a DNS caching, so the systemd resolv has localhost.
|
|
# To avoid the coreDNS loop, we specified nameservers explicitly and overrided the for the minikube.
|
|
# But the new version is appending the systemd resolv conf always. i.e. coreDNS loop.
|
|
set_fact:
|
|
dns_resolvers: "{{ minikube_dns_resolvers | join(' ') }}"
|
|
when: minikube_dns_resolvers|length>0
|
|
|
|
- name: Patch coreDNS corefile with the specified dns resolvers
|
|
command: |
|
|
kubectl patch cm coredns -n kube-system --patch="
|
|
data:
|
|
Corefile: |
|
|
.:53 {
|
|
errors
|
|
health {
|
|
lameduck 5s
|
|
}
|
|
ready
|
|
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
|
pods insecure
|
|
fallthrough in-addr.arpa ip6.arpa
|
|
ttl 30
|
|
}
|
|
prometheus :9153
|
|
forward . {{ dns_resolvers }}
|
|
cache 30
|
|
loop
|
|
reload
|
|
loadbalance
|
|
}"
|
|
when: minikube_dns_resolvers|length>0
|
|
|
|
- name: Rollout coreDNS deployment
|
|
command: |
|
|
kubectl rollout restart deploy/coredns -n kube-system
|
|
when: minikube_dns_resolvers|length>0
|