Gate: update Ansible conventions

Using tests as filters is deprecated, and will be removed in 2.9.

Change-Id: I2bc31177cdb3d59319c4cb04f77db573f3217479
This commit is contained in:
Pete Birley 2018-04-18 23:32:35 -05:00
parent d93649da5f
commit 5750d2a01f
6 changed files with 22 additions and 22 deletions

View File

@ -20,7 +20,7 @@
register: need_helm
ignore_errors: True
- name: install helm client
when: need_helm | failed
when: need_helm is failed
become_user: root
shell: |
TMP_DIR=$(mktemp -d)
@ -42,13 +42,13 @@
register: helm_server_running
ignore_errors: True
- name: getting current host user name
when: helm_server_running | failed
when: helm_server_running is failed
shell: id -un
args:
executable: /bin/bash
register: helm_server_user
- name: moving systemd unit into place for helm server
when: helm_server_running | failed
when: helm_server_running is failed
become: yes
become_user: root
template:
@ -56,7 +56,7 @@
dest: /etc/systemd/system/helm-serve.service
mode: 0640
- name: starting helm serve service
when: helm_server_running | failed
when: helm_server_running is failed
become: yes
become_user: root
systemd:
@ -80,7 +80,7 @@
register: helm_stable_repo_present
ignore_errors: True
- name: checking if helm 'stable' repo is present
when: helm_stable_repo_present | succeeded
when: helm_stable_repo_present is succeeded
command: helm repo remove stable
- name: adding helm local repo

View File

@ -18,21 +18,21 @@
ignore_errors: True
- name: centos | moving systemd unit into place
when: ( ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' ) and ( need_docker | failed )
when: ( ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' ) and ( need_docker is failed )
template:
src: centos-docker.service.j2
dest: /etc/systemd/system/docker.service
mode: 0640
- name: fedora | moving systemd unit into place
when: ( ansible_distribution == 'Fedora' ) and ( need_docker | failed )
when: ( ansible_distribution == 'Fedora' ) and ( need_docker is failed )
template:
src: fedora-docker.service.j2
dest: /etc/systemd/system/docker.service
mode: 0640
- name: ubuntu | moving systemd unit into place
when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' ) and ( need_docker | failed )
when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' ) and ( need_docker is failed )
template:
src: ubuntu-docker.service.j2
dest: /etc/systemd/system/docker.service
@ -46,14 +46,14 @@
state: directory
- name: proxy | moving proxy systemd unit into place
when: ( need_docker | failed ) and ( proxy.http is defined and (proxy.http | trim != "") )
when: ( need_docker is failed ) and ( proxy.http is defined and (proxy.http | trim != "") )
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
mode: 0640
- name: deploy docker packages
when: need_docker | failed
when: need_docker is failed
include_role:
name: deploy-package
tasks_from: dist

View File

@ -40,17 +40,17 @@
register: local_overrides
- name: "try to deploy release {{ chart_def['release'] }} in {{ chart_def['namespace'] }} namespace with {{ chart_def['chart_name'] }} chart"
when: check_deployed_result | failed
when: check_deployed_result is failed
command: "helm install {{ work_dir }}/{{ chart_def['chart_name'] }} --namespace {{ chart_def['namespace'] }} --name {{ chart_def['release'] }} --values={{ chart_values_file.path }}{% if local_overrides.stat.exists %} --values {{ work_dir }}/tools/gate/local-overrides/{{ chart_def['release'] }}.yaml{% endif %}"
register: out
- name: "display info for the helm {{ chart_def['release'] }} release deploy"
when: check_deployed_result | failed
when: check_deployed_result is failed
debug:
var: out.stdout_lines
- name: "pre-upgrade, delete jobs for {{ chart_def['release'] }} release"
when:
- check_deployed_result | succeeded
- check_deployed_result is succeeded
- "'upgrade' in chart_def"
- "'pre' in chart_def['upgrade']"
- "'delete' in chart_def['upgrade']['pre']"
@ -60,11 +60,11 @@
loop_var: helm_upgrade_delete_job
command: "kubectl delete --namespace {{ chart_def['namespace'] }} job -l application={{ helm_upgrade_delete_job.labels.application }},component={{ helm_upgrade_delete_job.labels.component }} --ignore-not-found=true"
- name: "try to upgrade release {{ chart_def['release'] }} in {{ chart_def['namespace'] }} namespace with {{ chart_def['chart_name'] }} chart"
when: check_deployed_result | succeeded
when: check_deployed_result is succeeded
command: "helm upgrade {{ chart_def['release'] }} {{ work_dir }}/{{ chart_def['chart_name'] }} --values={{ chart_values_file.path }}{% if local_overrides.stat.exists %} --values {{ work_dir }}/tools/gate/local-overrides/{{ chart_def['release'] }}.yaml{% endif %}"
register: out
- name: "display info for the helm {{ chart_def['release'] }} release upgrade"
when: check_deployed_result | succeeded
when: check_deployed_result is succeeded
debug:
var: out.stdout_lines

View File

@ -41,7 +41,7 @@
- name: "gathering logs for helm tests for {{ release }}"
when:
- test_result | succeeded
- test_result is succeeded
shell: |-
set -e
kubectl logs {{ release }}-test -n {{ namespace }} >> {{ logs_dir }}/helm-tests/{{ release }}.txt
@ -51,7 +51,7 @@
- name: "displaying logs for successful helm tests for {{ release }}"
when:
- test_result | succeeded
- test_result is succeeded
- "'output' in test_settings"
- "test_settings.output|bool == true"
debug:

View File

@ -156,7 +156,7 @@
register: kube_public_ns_exists
ignore_errors: True
- name: create kube-public namespace if required
when: kube_public_ns_exists | failed
when: kube_public_ns_exists is failed
command: kubectl --kubeconfig /mnt/rootfs/etc/kubernetes/admin.conf create ns kube-public
- name: sourcing kube cluster admin credentials
include_vars: /etc/kubernetes/admin.conf
@ -181,7 +181,7 @@
register: kube_public_configmap_role_exists
ignore_errors: True
- name: create kube-public configmap role if required
when: kube_public_configmap_role_exists | failed
when: kube_public_configmap_role_exists is failed
command: kubectl --kubeconfig /mnt/rootfs/etc/kubernetes/admin.conf -n kube-public create role system:bootstrap-signer-clusterinfo --verb get --resource configmaps
- name: check if kube-public configmap rolebinding exists
@ -189,7 +189,7 @@
register: kube_public_configmap_rolebinding_exists
ignore_errors: True
- name: create kube-public configmap rolebinding if required
when: kube_public_configmap_rolebinding_exists | failed
when: kube_public_configmap_rolebinding_exists is failed
command: kubectl --kubeconfig /mnt/rootfs/etc/kubernetes/admin.conf -n kube-public create rolebinding kubeadm:bootstrap-signer-clusterinfo --role system:bootstrap-signer-clusterinfo --user system:anonymous
- name: adding labels to namespace to support network policy

View File

@ -16,7 +16,7 @@
ignore_errors: True
- name: DNS | Disable network NetworkManager management of resolv.conf
when: network_manager_in_use | succeeded
when: network_manager_in_use is succeeded
ini_file:
path: /etc/NetworkManager/NetworkManager.conf
section: main
@ -30,7 +30,7 @@
dest: /etc/resolv.conf
- name: DNS | Restarting NetworkManager
when: network_manager_in_use | succeeded
when: network_manager_in_use is succeeded
block:
- name: DNS | Restarting NetworkManager Service
systemd: