From 5750d2a01f37de9b8381168341669d14939e1284 Mon Sep 17 00:00:00 2001 From: Pete Birley Date: Wed, 18 Apr 2018 23:32:35 -0500 Subject: [PATCH] Gate: update Ansible conventions Using tests as filters is deprecated, and will be removed in 2.9. Change-Id: I2bc31177cdb3d59319c4cb04f77db573f3217479 --- roles/build-helm-packages/tasks/setup-helm-serve.yaml | 10 +++++----- roles/deploy-docker/tasks/main.yaml | 10 +++++----- .../tasks/util-common-helm-chart.yaml | 10 +++++----- .../tasks/util-common-helm-test.yaml | 4 ++-- .../roles/deploy-kubeadm-master/tasks/main.yaml | 6 +++--- .../roles/deploy-kubelet/tasks/setup-dns.yaml | 4 ++-- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/roles/build-helm-packages/tasks/setup-helm-serve.yaml b/roles/build-helm-packages/tasks/setup-helm-serve.yaml index 948b6f3ad..6057484d9 100644 --- a/roles/build-helm-packages/tasks/setup-helm-serve.yaml +++ b/roles/build-helm-packages/tasks/setup-helm-serve.yaml @@ -20,7 +20,7 @@ register: need_helm ignore_errors: True - name: install helm client - when: need_helm | failed + when: need_helm is failed become_user: root shell: | TMP_DIR=$(mktemp -d) @@ -42,13 +42,13 @@ register: helm_server_running ignore_errors: True - name: getting current host user name - when: helm_server_running | failed + when: helm_server_running is failed shell: id -un args: executable: /bin/bash register: helm_server_user - name: moving systemd unit into place for helm server - when: helm_server_running | failed + when: helm_server_running is failed become: yes become_user: root template: @@ -56,7 +56,7 @@ dest: /etc/systemd/system/helm-serve.service mode: 0640 - name: starting helm serve service - when: helm_server_running | failed + when: helm_server_running is failed become: yes become_user: root systemd: @@ -80,7 +80,7 @@ register: helm_stable_repo_present ignore_errors: True - name: checking if helm 'stable' repo is present - when: helm_stable_repo_present | succeeded + when: helm_stable_repo_present is succeeded command: helm repo remove stable - name: adding helm local repo diff --git a/roles/deploy-docker/tasks/main.yaml b/roles/deploy-docker/tasks/main.yaml index 2923a98bb..eedeafd9e 100644 --- a/roles/deploy-docker/tasks/main.yaml +++ b/roles/deploy-docker/tasks/main.yaml @@ -18,21 +18,21 @@ ignore_errors: True - name: centos | moving systemd unit into place - when: ( ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' ) and ( need_docker | failed ) + when: ( ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' ) and ( need_docker is failed ) template: src: centos-docker.service.j2 dest: /etc/systemd/system/docker.service mode: 0640 - name: fedora | moving systemd unit into place - when: ( ansible_distribution == 'Fedora' ) and ( need_docker | failed ) + when: ( ansible_distribution == 'Fedora' ) and ( need_docker is failed ) template: src: fedora-docker.service.j2 dest: /etc/systemd/system/docker.service mode: 0640 - name: ubuntu | moving systemd unit into place - when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' ) and ( need_docker | failed ) + when: ( ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' ) and ( need_docker is failed ) template: src: ubuntu-docker.service.j2 dest: /etc/systemd/system/docker.service @@ -46,14 +46,14 @@ state: directory - name: proxy | moving proxy systemd unit into place - when: ( need_docker | failed ) and ( proxy.http is defined and (proxy.http | trim != "") ) + when: ( need_docker is failed ) and ( proxy.http is defined and (proxy.http | trim != "") ) template: src: http-proxy.conf.j2 dest: /etc/systemd/system/docker.service.d/http-proxy.conf mode: 0640 - name: deploy docker packages - when: need_docker | failed + when: need_docker is failed include_role: name: deploy-package tasks_from: dist diff --git a/roles/deploy-helm-packages/tasks/util-common-helm-chart.yaml b/roles/deploy-helm-packages/tasks/util-common-helm-chart.yaml index b95c7f1f5..3ff590d49 100644 --- a/roles/deploy-helm-packages/tasks/util-common-helm-chart.yaml +++ b/roles/deploy-helm-packages/tasks/util-common-helm-chart.yaml @@ -40,17 +40,17 @@ register: local_overrides - name: "try to deploy release {{ chart_def['release'] }} in {{ chart_def['namespace'] }} namespace with {{ chart_def['chart_name'] }} chart" - when: check_deployed_result | failed + when: check_deployed_result is failed command: "helm install {{ work_dir }}/{{ chart_def['chart_name'] }} --namespace {{ chart_def['namespace'] }} --name {{ chart_def['release'] }} --values={{ chart_values_file.path }}{% if local_overrides.stat.exists %} --values {{ work_dir }}/tools/gate/local-overrides/{{ chart_def['release'] }}.yaml{% endif %}" register: out - name: "display info for the helm {{ chart_def['release'] }} release deploy" - when: check_deployed_result | failed + when: check_deployed_result is failed debug: var: out.stdout_lines - name: "pre-upgrade, delete jobs for {{ chart_def['release'] }} release" when: - - check_deployed_result | succeeded + - check_deployed_result is succeeded - "'upgrade' in chart_def" - "'pre' in chart_def['upgrade']" - "'delete' in chart_def['upgrade']['pre']" @@ -60,11 +60,11 @@ loop_var: helm_upgrade_delete_job command: "kubectl delete --namespace {{ chart_def['namespace'] }} job -l application={{ helm_upgrade_delete_job.labels.application }},component={{ helm_upgrade_delete_job.labels.component }} --ignore-not-found=true" - name: "try to upgrade release {{ chart_def['release'] }} in {{ chart_def['namespace'] }} namespace with {{ chart_def['chart_name'] }} chart" - when: check_deployed_result | succeeded + when: check_deployed_result is succeeded command: "helm upgrade {{ chart_def['release'] }} {{ work_dir }}/{{ chart_def['chart_name'] }} --values={{ chart_values_file.path }}{% if local_overrides.stat.exists %} --values {{ work_dir }}/tools/gate/local-overrides/{{ chart_def['release'] }}.yaml{% endif %}" register: out - name: "display info for the helm {{ chart_def['release'] }} release upgrade" - when: check_deployed_result | succeeded + when: check_deployed_result is succeeded debug: var: out.stdout_lines diff --git a/roles/deploy-helm-packages/tasks/util-common-helm-test.yaml b/roles/deploy-helm-packages/tasks/util-common-helm-test.yaml index a926946b1..e5c078599 100644 --- a/roles/deploy-helm-packages/tasks/util-common-helm-test.yaml +++ b/roles/deploy-helm-packages/tasks/util-common-helm-test.yaml @@ -41,7 +41,7 @@ - name: "gathering logs for helm tests for {{ release }}" when: - - test_result | succeeded + - test_result is succeeded shell: |- set -e kubectl logs {{ release }}-test -n {{ namespace }} >> {{ logs_dir }}/helm-tests/{{ release }}.txt @@ -51,7 +51,7 @@ - name: "displaying logs for successful helm tests for {{ release }}" when: - - test_result | succeeded + - test_result is succeeded - "'output' in test_settings" - "test_settings.output|bool == true" debug: diff --git a/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubeadm-master/tasks/main.yaml b/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubeadm-master/tasks/main.yaml index bd7b16797..16529a307 100644 --- a/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubeadm-master/tasks/main.yaml +++ b/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubeadm-master/tasks/main.yaml @@ -156,7 +156,7 @@ register: kube_public_ns_exists ignore_errors: True - name: create kube-public namespace if required - when: kube_public_ns_exists | failed + when: kube_public_ns_exists is failed command: kubectl --kubeconfig /mnt/rootfs/etc/kubernetes/admin.conf create ns kube-public - name: sourcing kube cluster admin credentials include_vars: /etc/kubernetes/admin.conf @@ -181,7 +181,7 @@ register: kube_public_configmap_role_exists ignore_errors: True - name: create kube-public configmap role if required - when: kube_public_configmap_role_exists | failed + when: kube_public_configmap_role_exists is failed command: kubectl --kubeconfig /mnt/rootfs/etc/kubernetes/admin.conf -n kube-public create role system:bootstrap-signer-clusterinfo --verb get --resource configmaps - name: check if kube-public configmap rolebinding exists @@ -189,7 +189,7 @@ register: kube_public_configmap_rolebinding_exists ignore_errors: True - name: create kube-public configmap rolebinding if required - when: kube_public_configmap_rolebinding_exists | failed + when: kube_public_configmap_rolebinding_exists is failed command: kubectl --kubeconfig /mnt/rootfs/etc/kubernetes/admin.conf -n kube-public create rolebinding kubeadm:bootstrap-signer-clusterinfo --role system:bootstrap-signer-clusterinfo --user system:anonymous - name: adding labels to namespace to support network policy diff --git a/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubelet/tasks/setup-dns.yaml b/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubelet/tasks/setup-dns.yaml index b6d708606..cc31168b7 100644 --- a/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubelet/tasks/setup-dns.yaml +++ b/tools/images/kubeadm-aio/assets/opt/playbooks/roles/deploy-kubelet/tasks/setup-dns.yaml @@ -16,7 +16,7 @@ ignore_errors: True - name: DNS | Disable network NetworkManager management of resolv.conf - when: network_manager_in_use | succeeded + when: network_manager_in_use is succeeded ini_file: path: /etc/NetworkManager/NetworkManager.conf section: main @@ -30,7 +30,7 @@ dest: /etc/resolv.conf - name: DNS | Restarting NetworkManager - when: network_manager_in_use | succeeded + when: network_manager_in_use is succeeded block: - name: DNS | Restarting NetworkManager Service systemd: