Browse Source

Verify YAML syntax in gates

This patchset implements yamllint test to all *.yml
files.

Also fixes syntax errors to make jobs to pass.

Change-Id: I3186adf9835b4d0cada272d156b17d1bc9c2b799
tags/7.0.0.0b2
Eduardo Gonzalez 1 year ago
parent
commit
ea1a1dee0d
50 changed files with 169 additions and 167 deletions
  1. 15
    0
      .yamllint
  2. 2
    1
      .zuul.yaml
  3. 15
    15
      ansible/group_vars/all.yml
  4. 0
    1
      ansible/kolla-host.yml
  5. 1
    1
      ansible/roles/baremetal/tasks/post-install.yml
  6. 1
    1
      ansible/roles/bifrost/tasks/config.yml
  7. 2
    2
      ansible/roles/ceilometer/tasks/register.yml
  8. 0
    1
      ansible/roles/chrony/tasks/config.yml
  9. 1
    1
      ansible/roles/common/defaults/main.yml
  10. 1
    1
      ansible/roles/designate/tasks/config.yml
  11. 1
    1
      ansible/roles/destroy/tasks/cleanup_host.yml
  12. 0
    1
      ansible/roles/glance/handlers/main.yml
  13. 0
    1
      ansible/roles/glance/tasks/config.yml
  14. 0
    1
      ansible/roles/haproxy/tasks/config.yml
  15. 1
    2
      ansible/roles/horizon/tasks/config.yml
  16. 61
    61
      ansible/roles/mariadb/tasks/recover_cluster.yml
  17. 0
    1
      ansible/roles/memcached/tasks/config.yml
  18. 0
    1
      ansible/roles/neutron/tasks/config-neutron-fake.yml
  19. 0
    1
      ansible/roles/neutron/tasks/config.yml
  20. 1
    1
      ansible/roles/nova/defaults/main.yml
  21. 1
    0
      ansible/roles/nova/tasks/bootstrap_xenapi.yml
  22. 0
    1
      ansible/roles/nova/tasks/config.yml
  23. 0
    1
      ansible/roles/openvswitch/tasks/config.yml
  24. 1
    1
      ansible/roles/ovs-dpdk/defaults/main.yml
  25. 2
    2
      ansible/roles/ovs-dpdk/handlers/main.yml
  26. 1
    1
      ansible/roles/ovs-dpdk/tasks/config.yml
  27. 0
    1
      ansible/roles/ovs-dpdk/tasks/pull.yml
  28. 5
    5
      ansible/roles/prechecks/tasks/database_checks.yml
  29. 1
    3
      ansible/roles/prechecks/tasks/user_checks.yml
  30. 4
    4
      ansible/roles/rabbitmq/tasks/config.yml
  31. 0
    1
      ansible/roles/skydive/defaults/main.yml
  32. 0
    1
      ansible/roles/skydive/handlers/main.yml
  33. 0
    1
      ansible/roles/skydive/tasks/precheck.yml
  34. 0
    1
      ansible/roles/stop/tasks/stop_containers.yml
  35. 2
    2
      ansible/roles/vitrage/defaults/main.yml
  36. 14
    14
      contrib/demos/magnum/redis-kube/redis-controller.yaml
  37. 5
    5
      contrib/demos/magnum/redis-kube/redis-proxy.yaml
  38. 8
    8
      contrib/demos/magnum/redis-kube/redis-sentinel-controller.yaml
  39. 1
    1
      etc/kolla/passwords.yml
  40. 3
    3
      releasenotes/notes/add-congress-877644b4b0e2ed0a.yaml
  41. 1
    1
      releasenotes/notes/add-designate-producer-b6e94153ff9bc45c.yaml
  42. 5
    5
      releasenotes/notes/add-watcher-a97995ace827cf71.yaml
  43. 1
    1
      releasenotes/notes/congress-broken-cbf8ca59d90a85cb.yaml
  44. 0
    1
      releasenotes/notes/deprecate-some-config-files-0ed8c75f6bb4aa26.yaml
  45. 1
    1
      releasenotes/notes/enable-nfs-volume-snapshots-a0347a31662b1109.yaml
  46. 1
    1
      releasenotes/notes/opendaylight-role-b1787bc458da5bc4.yaml
  47. 0
    1
      releasenotes/notes/remove-nova-network-76e1c9b51a4acd5f.yaml
  48. 1
    0
      tests/pre.yml
  49. 5
    5
      tools/playbook-setup-nodes.yml
  50. 4
    0
      tox.ini

+ 15
- 0
.yamllint View File

@@ -0,0 +1,15 @@
1
+extends: default
2
+ignore: |
3
+  .tox/
4
+
5
+rules:
6
+  line-length: disable
7
+  truthy: disable
8
+  braces:
9
+    max-spaces-inside: 1
10
+  comments:
11
+    # Ignore first space in comment because we set default options as:
12
+    #openstack_version: "pike"
13
+    require-starting-space: true
14
+    ignore: |
15
+      etc/kolla/globals.yml

+ 2
- 1
.zuul.yaml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 - project:
2 3
     check:
3 4
       jobs:
@@ -72,7 +73,7 @@
72 73
     vars:
73 74
       scenario: aio
74 75
     roles:
75
-        - zuul: openstack-infra/zuul-jobs
76
+      - zuul: openstack-infra/zuul-jobs
76 77
 
77 78
 - job:
78 79
     name: kolla-ansible-centos-source

+ 15
- 15
ansible/group_vars/all.yml View File

@@ -62,7 +62,7 @@ container_proxy:
62 62
 
63 63
 # By default, Kolla API services bind to the network address assigned
64 64
 # to the api_interface.  Allow the bind address to be an override.
65
-api_interface_address:  "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
65
+api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
66 66
 
67 67
 ################
68 68
 # Chrony options
@@ -98,14 +98,14 @@ docker_restart_policy_retry: "10"
98 98
 
99 99
 # Common options used throughout Docker
100 100
 docker_common_options:
101
-    auth_email: "{{ docker_registry_email }}"
102
-    auth_password: "{{ docker_registry_password }}"
103
-    auth_registry: "{{ docker_registry }}"
104
-    auth_username: "{{ docker_registry_username }}"
105
-    environment:
106
-      KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
107
-    restart_policy: "{{ docker_restart_policy }}"
108
-    restart_retries: "{{ docker_restart_policy_retry }}"
101
+  auth_email: "{{ docker_registry_email }}"
102
+  auth_password: "{{ docker_registry_password }}"
103
+  auth_registry: "{{ docker_registry }}"
104
+  auth_username: "{{ docker_registry_username }}"
105
+  environment:
106
+    KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
107
+  restart_policy: "{{ docker_restart_policy }}"
108
+  restart_retries: "{{ docker_restart_policy_retry }}"
109 109
 
110 110
 
111 111
 ####################
@@ -336,7 +336,7 @@ supported_policy_format_list:
336 336
 
337 337
 # In the context of multi-regions, list here the name of all your regions.
338 338
 multiple_regions_names:
339
-    - "{{ openstack_region_name }}"
339
+  - "{{ openstack_region_name }}"
340 340
 
341 341
 openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min }}"
342 342
 openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}"
@@ -350,11 +350,11 @@ nova_console: "novnc"
350 350
 # OpenStack authentication string. You should only need to override these if you
351 351
 # are changing the admin tenant/project or user.
352 352
 openstack_auth:
353
-    auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
354
-    username: "admin"
355
-    password: "{{ keystone_admin_password }}"
356
-    project_name: "admin"
357
-    domain_name: "default"
353
+  auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
354
+  username: "admin"
355
+  password: "{{ keystone_admin_password }}"
356
+  project_name: "admin"
357
+  domain_name: "default"
358 358
 
359 359
 # Endpoint type used to connect with OpenStack services with ansible modules.
360 360
 # Valid options are [ public, internal, admin ]

+ 0
- 1
ansible/kolla-host.yml View File

@@ -37,4 +37,3 @@
37 37
   roles:
38 38
     - { role: baremetal,
39 39
         tags: baremetal }
40
-

+ 1
- 1
ansible/roles/baremetal/tasks/post-install.yml View File

@@ -93,7 +93,7 @@
93 93
     - enable_host_ntp | bool
94 94
 
95 95
 - name: Synchronizing time one-time
96
-  command:  ntpd -gq
96
+  command: ntpd -gq
97 97
   become: True
98 98
   when: enable_host_ntp | bool
99 99
 

+ 1
- 1
ansible/roles/bifrost/tasks/config.yml View File

@@ -31,7 +31,7 @@
31 31
     mode: "0660"
32 32
   become: true
33 33
   with_items:
34
-     - "rabbitmq-env.conf"
34
+    - "rabbitmq-env.conf"
35 35
 
36 36
 - name: Template ssh keys
37 37
   template:

+ 2
- 2
ansible/roles/ceilometer/tasks/register.yml View File

@@ -11,7 +11,7 @@
11 11
       auth: "{{ '{{ openstack_ceilometer_auth }}' }}"
12 12
       endpoint_type: "{{ openstack_interface }}"
13 13
     module_extra_vars:
14
-     openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
14
+      openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
15 15
   run_once: True
16 16
 
17 17
 - name: Associate the ResellerAdmin role and ceilometer user
@@ -25,6 +25,6 @@
25 25
       auth: "{{ '{{ openstack_ceilometer_auth }}' }}"
26 26
       endpoint_type: "{{ openstack_interface }}"
27 27
     module_extra_vars:
28
-     openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
28
+      openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
29 29
   when: enable_swift | bool
30 30
   run_once: True

+ 0
- 1
ansible/roles/chrony/tasks/config.yml View File

@@ -49,4 +49,3 @@
49 49
   with_dict: "{{ chrony_services }}"
50 50
   notify:
51 51
     - Restart chrony container
52
-

+ 1
- 1
ansible/roles/common/defaults/main.yml View File

@@ -28,7 +28,7 @@ common_services:
28 28
       - "/dev/:/dev/"
29 29
       - "/run/:/run/:shared"
30 30
       - "kolla_logs:/var/log/kolla/"
31
-# DUMMY_ENVIRONMENT is needed because empty environment is not supported
31
+  # DUMMY_ENVIRONMENT is needed because empty environment is not supported
32 32
   cron:
33 33
     container_name: cron
34 34
     enabled: True

+ 1
- 1
ansible/roles/designate/tasks/config.yml View File

@@ -79,7 +79,7 @@
79 79
   vars:
80 80
     service: "{{ designate_services['designate-worker'] }}"
81 81
   template:
82
-    src:  "{{ item }}"
82
+    src: "{{ item }}"
83 83
     dest: "{{ node_config_directory }}/designate-worker/pools.yaml"
84 84
     mode: "0660"
85 85
   become: true

+ 1
- 1
ansible/roles/destroy/tasks/cleanup_host.yml View File

@@ -1,7 +1,7 @@
1 1
 ---
2 2
 - name: Destroying Kolla host configuration
3 3
   become: true
4
-  script:  ../tools/cleanup-host
4
+  script: ../tools/cleanup-host
5 5
   environment:
6 6
     enable_haproxy: "{{ enable_haproxy }}"
7 7
     enable_swift: "{{ enable_swift }}"

+ 0
- 1
ansible/roles/glance/handlers/main.yml View File

@@ -45,4 +45,3 @@
45 45
       or glance_conf.changed | bool
46 46
       or policy_overwriting.changed | bool
47 47
       or glance_registry_container.changed | bool
48
-

+ 0
- 1
ansible/roles/glance/tasks/config.yml View File

@@ -125,4 +125,3 @@
125 125
   notify:
126 126
     - Restart glance-api container
127 127
     - Restart glance-registry container
128
-

+ 0
- 1
ansible/roles/haproxy/tasks/config.yml View File

@@ -109,4 +109,3 @@
109 109
   with_dict: "{{ haproxy_services }}"
110 110
   notify:
111 111
     - "Restart {{ item.key }} container"
112
-

+ 1
- 2
ansible/roles/horizon/tasks/config.yml View File

@@ -13,7 +13,7 @@
13 13
   with_dict: "{{ horizon_services }}"
14 14
 
15 15
 - set_fact:
16
-   custom_policy: []
16
+    custom_policy: []
17 17
 
18 18
 - include: policy_item.yml
19 19
   vars:
@@ -131,4 +131,3 @@
131 131
     - horizon.enabled | bool
132 132
   notify:
133 133
     - Restart horizon container
134
-

+ 61
- 61
ansible/roles/mariadb/tasks/recover_cluster.yml View File

@@ -15,67 +15,67 @@
15 15
   run_once: true
16 16
 
17 17
 - block:
18
-  - name: Stop MariaDB containers
19
-    kolla_docker:
20
-      name: "{{ mariadb_service.container_name }}"
21
-      action: "stop_container"
22
-
23
-  - name: Run MariaDB wsrep recovery
24
-    kolla_docker:
25
-      action: "start_container"
26
-      common_options: "{{ docker_common_options }}"
27
-      environment:
28
-        KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
29
-        BOOTSTRAP_ARGS: "--wsrep-recover"
30
-      image: "{{ mariadb_service.image }}"
31
-      labels:
32
-        BOOTSTRAP:
33
-      name: "{{ mariadb_service.container_name }}"
34
-      restart_policy: "never"
35
-      volumes: "{{ mariadb_service.volumes }}"
36
-
37
-  - name: Stop MariaDB containers
38
-    kolla_docker:
39
-      name: "{{ mariadb_service.container_name }}"
40
-      action: "stop_container"
41
-
42
-  - name: Copying MariaDB log file to /tmp
43
-    shell: "docker cp {{ mariadb_service.container_name }}:/var/log/kolla/mariadb/mariadb.log /tmp/mariadb_tmp.log"
44
-
45
-  - name: Get MariaDB wsrep recovery seqno
46
-    shell: "tail -n 200 /tmp/mariadb_tmp.log | grep  Recovered | tail -1 | awk '{print $7}' | awk -F'\n' '{print $1}' | awk -F':' '{print $2}'"
47
-    register: wsrep_recovery_seqno
48
-
49
-  - name: Removing MariaDB log file from /tmp
50
-    file: path=/tmp/mariadb_tmp.log state=absent
51
-    changed_when: false
52
-    check_mode: no
53
-
54
-  - name: Registering MariaDB seqno variable
55
-    set_fact:
56
-      seqno: "{{ wsrep_recovery_seqno.stdout_lines[0] }}"
57
-    changed_when: false
58
-
59
-  - name: Comparing seqno value on all mariadb hosts
60
-    shell:
61
-      cmd: |
62
-        if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} &&
63
-        {{ hostvars[inventory_hostname]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^[0-9]+$ &&
64
-        {{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi
65
-    with_items: "{{ groups['mariadb'] }}"
66
-    register: seqno_compare
67
-    args:
68
-      executable: /bin/bash
69
-    changed_when: false
70
-
71
-  - name: Writing hostname of host with the largest seqno to temp file
72
-    local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644
73
-    changed_when: false
74
-    when: seqno_compare.results | map(attribute='stdout') | join('') == ""
75
-
76
-  - name: Registering mariadb_recover_inventory_name from temp file
77
-    set_fact:
78
-      mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
18
+    - name: Stop MariaDB containers
19
+      kolla_docker:
20
+        name: "{{ mariadb_service.container_name }}"
21
+        action: "stop_container"
22
+
23
+    - name: Run MariaDB wsrep recovery
24
+      kolla_docker:
25
+        action: "start_container"
26
+        common_options: "{{ docker_common_options }}"
27
+        environment:
28
+          KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
29
+          BOOTSTRAP_ARGS: "--wsrep-recover"
30
+        image: "{{ mariadb_service.image }}"
31
+        labels:
32
+          BOOTSTRAP:
33
+        name: "{{ mariadb_service.container_name }}"
34
+        restart_policy: "never"
35
+        volumes: "{{ mariadb_service.volumes }}"
36
+
37
+    - name: Stop MariaDB containers
38
+      kolla_docker:
39
+        name: "{{ mariadb_service.container_name }}"
40
+        action: "stop_container"
41
+
42
+    - name: Copying MariaDB log file to /tmp
43
+      shell: "docker cp {{ mariadb_service.container_name }}:/var/log/kolla/mariadb/mariadb.log /tmp/mariadb_tmp.log"
44
+
45
+    - name: Get MariaDB wsrep recovery seqno
46
+      shell: "tail -n 200 /tmp/mariadb_tmp.log | grep  Recovered | tail -1 | awk '{print $7}' | awk -F'\n' '{print $1}' | awk -F':' '{print $2}'"
47
+      register: wsrep_recovery_seqno
48
+
49
+    - name: Removing MariaDB log file from /tmp
50
+      file: path=/tmp/mariadb_tmp.log state=absent
51
+      changed_when: false
52
+      check_mode: no
53
+
54
+    - name: Registering MariaDB seqno variable
55
+      set_fact:
56
+        seqno: "{{ wsrep_recovery_seqno.stdout_lines[0] }}"
57
+      changed_when: false
58
+
59
+    - name: Comparing seqno value on all mariadb hosts
60
+      shell:
61
+        cmd: |
62
+          if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} &&
63
+          {{ hostvars[inventory_hostname]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^[0-9]+$ &&
64
+          {{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi
65
+      with_items: "{{ groups['mariadb'] }}"
66
+      register: seqno_compare
67
+      args:
68
+        executable: /bin/bash
69
+      changed_when: false
70
+
71
+    - name: Writing hostname of host with the largest seqno to temp file
72
+      local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644
73
+      changed_when: false
74
+      when: seqno_compare.results | map(attribute='stdout') | join('') == ""
75
+
76
+    - name: Registering mariadb_recover_inventory_name from temp file
77
+      set_fact:
78
+        mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
79 79
   when:
80 80
     - mariadb_recover_inventory_name is not defined
81 81
 

+ 0
- 1
ansible/roles/memcached/tasks/config.yml View File

@@ -37,4 +37,3 @@
37 37
     - service.enabled | bool
38 38
     - action != "config"
39 39
   notify: Restart memcached container
40
-

+ 0
- 1
ansible/roles/neutron/tasks/config-neutron-fake.yml View File

@@ -86,4 +86,3 @@
86 86
     group: "{{ config_owner_group }}"
87 87
   when: inventory_hostname in groups['compute']
88 88
   with_sequence: start=1 end={{ num_nova_fake_per_node }}
89
-

+ 0
- 1
ansible/roles/neutron/tasks/config.yml View File

@@ -436,4 +436,3 @@
436 436
   with_dict: "{{ neutron_services }}"
437 437
   notify:
438 438
     - "Restart {{ item.key }} container"
439
-

+ 1
- 1
ansible/roles/nova/defaults/main.yml View File

@@ -111,7 +111,7 @@ nova_services:
111 111
     group: "compute"
112 112
     image: "{{ nova_compute_image_full }}"
113 113
     environment:
114
-        LIBGUESTFS_BACKEND: "direct"
114
+      LIBGUESTFS_BACKEND: "direct"
115 115
     privileged: True
116 116
     enabled: "{{ not enable_nova_fake | bool }}"
117 117
     ipc_mode: "host"

+ 1
- 0
ansible/roles/nova/tasks/bootstrap_xenapi.yml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 - name: Install package python-os-xenapi
2 3
   package:
3 4
     name: python-os-xenapi

+ 0
- 1
ansible/roles/nova/tasks/config.yml View File

@@ -203,4 +203,3 @@
203 203
   with_dict: "{{ nova_services }}"
204 204
   notify:
205 205
     - "Restart {{ item.key }} container"
206
-

+ 0
- 1
ansible/roles/openvswitch/tasks/config.yml View File

@@ -70,4 +70,3 @@
70 70
   with_dict: "{{ openvswitch_services }}"
71 71
   notify:
72 72
     - "Restart {{ item.key }} container"
73
-

+ 1
- 1
ansible/roles/ovs-dpdk/defaults/main.yml View File

@@ -50,7 +50,7 @@ ovsdpdk_services:
50 50
 ####################
51 51
 ovs_bridge_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
52 52
 ovs_port_mappings: "{% for bridge in neutron_bridge_name.split(',') %} {{ neutron_external_interface.split(',')[loop.index0] }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
53
-dpdk_tunnel_interface : "{{neutron_external_interface}}"
53
+dpdk_tunnel_interface: "{{neutron_external_interface}}"
54 54
 dpdk_tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['address'] }}"
55 55
 tunnel_interface_network: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['network']}}/{{hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['netmask']}}"
56 56
 tunnel_interface_cidr: "{{dpdk_tunnel_interface_address}}/{{ tunnel_interface_network | ipaddr('prefix') }}"

+ 2
- 2
ansible/roles/ovs-dpdk/handlers/main.yml View File

@@ -50,7 +50,7 @@
50 50
        or inventory_hostname in groups['neutron-l3-agent']
51 51
        or inventory_hostname in groups['neutron-metadata-agent']
52 52
        or inventory_hostname in groups['neutron-vpnaas-agent'])
53
-    -  ovs_physical_port_policy == 'indexed'
53
+    - ovs_physical_port_policy == 'indexed'
54 54
 
55 55
 - name: Restart ovsdpdk-vswitchd container
56 56
   vars:
@@ -86,7 +86,7 @@
86 86
        or inventory_hostname in groups['neutron-l3-agent']
87 87
        or inventory_hostname in groups['neutron-metadata-agent']
88 88
        or inventory_hostname in groups['neutron-vpnaas-agent'])
89
-    -  ovs_physical_port_policy == 'named'
89
+    - ovs_physical_port_policy == 'named'
90 90
 
91 91
 - name: wait for dpdk tunnel ip
92 92
   wait_for:

+ 1
- 1
ansible/roles/ovs-dpdk/tasks/config.yml View File

@@ -19,7 +19,7 @@
19 19
     - item.value.host_in_groups | bool
20 20
   with_dict: "{{ ovsdpdk_services }}"
21 21
   notify:
22
-  - "Restart {{ item.key }} container"
22
+    - "Restart {{ item.key }} container"
23 23
 
24 24
 - name: Copying ovs-dpdkctl tool
25 25
   copy:

+ 0
- 1
ansible/roles/ovs-dpdk/tasks/pull.yml View File

@@ -8,4 +8,3 @@
8 8
     - item.value.enabled | bool
9 9
     - item.value.host_in_groups | bool
10 10
   with_dict: "{{ ovsdpdk_services }}"
11
-

+ 5
- 5
ansible/roles/prechecks/tasks/database_checks.yml View File

@@ -6,9 +6,9 @@
6 6
     port: "{{ database_port }}"
7 7
   with_items: "{{ groups['mariadb'] }}"
8 8
   when:
9
-      - not enable_mariadb | bool
10
-      - enable_external_mariadb_load_balancer | bool
11
-      - inventory_hostname in groups['haproxy']
9
+    - not enable_mariadb | bool
10
+    - enable_external_mariadb_load_balancer | bool
11
+    - inventory_hostname in groups['haproxy']
12 12
 
13 13
 - name: "Check if external database address is reachable from all hosts"
14 14
   wait_for:
@@ -16,5 +16,5 @@
16 16
     host: "{{ database_address }}"
17 17
     port: "{{ database_port }}"
18 18
   when:
19
-      - not enable_mariadb | bool
20
-      - not enable_external_mariadb_load_balancer | bool
19
+    - not enable_mariadb | bool
20
+    - not enable_external_mariadb_load_balancer | bool

+ 1
- 3
ansible/roles/prechecks/tasks/user_checks.yml View File

@@ -10,10 +10,8 @@
10 10
     key: "{{ config_owner_group }}"
11 11
   register: getent_group
12 12
 
13
-#(duonghq) it's only a basic check, should be refined later
13
+# NOTE(duonghq): it's only a basic check, should be refined later
14 14
 - name: Check if ansible user can do passwordless sudo
15 15
   shell: sudo -n true
16 16
   register: result
17 17
   failed_when: result | failed
18
-
19
-

+ 4
- 4
ansible/roles/rabbitmq/tasks/config.yml View File

@@ -39,10 +39,10 @@
39 39
     - inventory_hostname in groups[service.group]
40 40
     - service.enabled | bool
41 41
   with_items:
42
-     - "rabbitmq-env.conf"
43
-     - "rabbitmq.config"
44
-     - "rabbitmq-clusterer.config"
45
-     - "definitions.json"
42
+    - "rabbitmq-env.conf"
43
+    - "rabbitmq.config"
44
+    - "rabbitmq-clusterer.config"
45
+    - "definitions.json"
46 46
   notify:
47 47
     - Restart rabbitmq container
48 48
 

+ 0
- 1
ansible/roles/skydive/defaults/main.yml View File

@@ -34,4 +34,3 @@ skydive_analyzer_image_full: "{{ skydive_analyzer_image }}:{{ skydive_analyzer_t
34 34
 skydive_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-skydive-agent"
35 35
 skydive_agent_tag: "{{ openstack_release }}"
36 36
 skydive_agent_image_full: "{{ skydive_agent_image }}:{{ skydive_agent_tag }}"
37
-

+ 0
- 1
ansible/roles/skydive/handlers/main.yml View File

@@ -41,4 +41,3 @@
41 41
     - config_json.changed | bool
42 42
       or skydive_conf.changed |bool
43 43
       or skydive_agent_container.changed | bool
44
-

+ 0
- 1
ansible/roles/skydive/tasks/precheck.yml View File

@@ -31,4 +31,3 @@
31 31
     - container_facts['skydive_agent'] is not defined
32 32
     - inventory_hostname in groups[skydive_agent.group]
33 33
     - skydive_agent.enabled | bool
34
-

+ 0
- 1
ansible/roles/stop/tasks/stop_containers.yml View File

@@ -1,4 +1,3 @@
1 1
 ---
2 2
 - name: Stopping Kolla containers
3 3
   command: /tmp/kolla-stop/tools/stop-containers
4
-

+ 2
- 2
ansible/roles/vitrage/defaults/main.yml View File

@@ -121,8 +121,8 @@ vitrage_datasource:
121 121
     enabled: "{{ enable_cinder | bool }}"
122 122
   - name: "neutron.network,neutron.port"
123 123
     enabled: "{{ enable_neutron | bool }}"
124
-#TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody.
125
-# Document process to deploy vitrage+heat.
124
+  # TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody.
125
+  # Document process to deploy vitrage+heat.
126 126
   - name: "heat.stack"
127 127
     enabled: "no"
128 128
 

+ 14
- 14
contrib/demos/magnum/redis-kube/redis-controller.yaml View File

@@ -4,7 +4,7 @@ kind: ReplicationController
4 4
 metadata:
5 5
   name: redis
6 6
 spec:
7
-  replicas:  2
7
+  replicas: 2
8 8
   selector:
9 9
     name: redis
10 10
   template:
@@ -13,16 +13,16 @@ spec:
13 13
         name: redis
14 14
     spec:
15 15
       containers:
16
-      - name: redis
17
-        image: kubernetes/redis:v1
18
-        ports:
19
-        - containerPort: 6379
20
-        resources:
21
-          limits:
22
-            cpu: "1"
23
-        volumeMounts:
24
-        - mountPath: /redis-master-data
25
-          name: data
26
-      volumes:
27
-        - name: data
28
-          emptyDir: {}
16
+        - name: redis
17
+          image: kubernetes/redis:v1
18
+          ports:
19
+            - containerPort: 6379
20
+          resources:
21
+            limits:
22
+              cpu: "1"
23
+          volumeMounts:
24
+            - mountPath: /redis-master-data
25
+              name: data
26
+          volumes:
27
+            - name: data
28
+              emptyDir: {}

+ 5
- 5
contrib/demos/magnum/redis-kube/redis-proxy.yaml View File

@@ -8,8 +8,8 @@ metadata:
8 8
   name: redis-proxy
9 9
 spec:
10 10
   containers:
11
-  - name: proxy
12
-    image: kubernetes/redis-proxy:v1
13
-    ports:
14
-    - containerPort: 6379
15
-      name: api
11
+    - name: proxy
12
+      image: kubernetes/redis-proxy:v1
13
+      ports:
14
+        - containerPort: 6379
15
+          name: api

+ 8
- 8
contrib/demos/magnum/redis-kube/redis-sentinel-controller.yaml View File

@@ -4,7 +4,7 @@ kind: ReplicationController
4 4
 metadata:
5 5
   name: redis-sentinel
6 6
 spec:
7
-  replicas:  2
7
+  replicas: 2
8 8
   selector:
9 9
     redis-sentinel: "true"
10 10
   template:
@@ -15,10 +15,10 @@ spec:
15 15
         role: sentinel
16 16
     spec:
17 17
       containers:
18
-      - name: sentinel
19
-        image: kubernetes/redis:v1
20
-        env:
21
-          - name: SENTINEL
22
-            value: "true"
23
-        ports:
24
-          - containerPort: 26379
18
+        - name: sentinel
19
+          image: kubernetes/redis:v1
20
+          env:
21
+            - name: SENTINEL
22
+              value: "true"
23
+          ports:
24
+            - containerPort: 26379

+ 1
- 1
etc/kolla/passwords.yml View File

@@ -179,7 +179,7 @@ vitrage_keystone_password:
179 179
 
180 180
 memcache_secret_key:
181 181
 
182
-#HMAC secret key
182
+# HMAC secret key
183 183
 osprofiler_secret:
184 184
 
185 185
 nova_ssh_key:

+ 3
- 3
releasenotes/notes/add-congress-877644b4b0e2ed0a.yaml View File

@@ -1,5 +1,5 @@
1 1
 ---
2 2
 features:
3
-- Add ansible role for openstack congress project which provide
4
-  policy as a service across any collection of cloud services in
5
-  order to offer governance and compliance for dynamic infrastructures.
3
+  - Add ansible role for openstack congress project which provide
4
+    policy as a service across any collection of cloud services in
5
+    order to offer governance and compliance for dynamic infrastructures.

+ 1
- 1
releasenotes/notes/add-designate-producer-b6e94153ff9bc45c.yaml View File

@@ -1,3 +1,3 @@
1 1
 ---
2 2
 features:
3
-  -  Add designate-producer ansible role. Orchestrates periodic tasks that are run by designate.
3
+  - Add designate-producer ansible role. Orchestrates periodic tasks that are run by designate.

+ 5
- 5
releasenotes/notes/add-watcher-a97995ace827cf71.yaml View File

@@ -1,6 +1,6 @@
1 1
 ---
2
- features:
3
-   - Introduce OpenStack Infrastructure Optimization
4
-     service, also known as Watcher.  This project makes
5
-     use of Ceilometer data to rebalance the cloud to
6
-     meet declared goals and strategies.
2
+features:
3
+  - Introduce OpenStack Infrastructure Optimization
4
+    service, also known as Watcher.  This project makes
5
+    use of Ceilometer data to rebalance the cloud to
6
+    meet declared goals and strategies.

+ 1
- 1
releasenotes/notes/congress-broken-cbf8ca59d90a85cb.yaml View File

@@ -1,4 +1,4 @@
1 1
 ---
2 2
 other:
3 3
   - Congress doesn't work correctly out of the box and will
4
-    not deploy.  See Bug #1634641.
4
+    not deploy. See Bug https://bugs.launchpad.net/kolla-ansible/+bug/1634641.

+ 0
- 1
releasenotes/notes/deprecate-some-config-files-0ed8c75f6bb4aa26.yaml View File

@@ -6,4 +6,3 @@ deprecations:
6 6
 
7 7
     * /etc/kolla/config/database.conf
8 8
     * /etc/kolla/config/messaging.conf
9
-

+ 1
- 1
releasenotes/notes/enable-nfs-volume-snapshots-a0347a31662b1109.yaml View File

@@ -8,4 +8,4 @@ upgrade:
8 8
   - |
9 9
     On upgrade NFS Cinder snapshots will be activated. One can
10 10
     prohibit this by setting nfs_snapshot_support = False in
11
-    /etc/kolla/config/cinder/cinder-volume.conf, section '[nfs-1]'.
11
+    /etc/kolla/config/cinder/cinder-volume.conf, section '[nfs-1]'.

+ 1
- 1
releasenotes/notes/opendaylight-role-b1787bc458da5bc4.yaml View File

@@ -1,3 +1,3 @@
1 1
 ---
2 2
 features:
3
-  - Add OpenDaylight role
3
+  - Add OpenDaylight role

+ 0
- 1
releasenotes/notes/remove-nova-network-76e1c9b51a4acd5f.yaml View File

@@ -2,4 +2,3 @@
2 2
 deprecations:
3 3
   - The nova-network was deprecated, we remove it from the nova ansible
4 4
     role.
5
-

+ 1
- 0
tests/pre.yml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 - hosts: all
2 3
   vars:
3 4
     logs_dir: "/tmp/logs"

+ 5
- 5
tools/playbook-setup-nodes.yml View File

@@ -32,8 +32,8 @@
32 32
   become: true
33 33
 
34 34
   tasks:
35
-  - name: Create log directory for node
36
-    file:
37
-      state: directory
38
-      path: /tmp/{{ inventory_hostname }}
39
-    become: false
35
+    - name: Create log directory for node
36
+      file:
37
+        state: directory
38
+        path: /tmp/{{ inventory_hostname }}
39
+      become: false

+ 4
- 0
tox.ini View File

@@ -30,11 +30,15 @@ setenv = VIRTUAL_ENV={envdir}
30 30
 commands = python setup.py testr --coverage --testr-args='{posargs}'
31 31
 
32 32
 [testenv:pep8]
33
+deps =
34
+    {[testenv]deps}
35
+    yamllint
33 36
 commands =
34 37
   {toxinidir}/tools/run-bashate.sh
35 38
   flake8 {posargs}
36 39
   python {toxinidir}/tools/validate-all-file.py
37 40
   bandit -r ansible kolla_ansible tests tools
41
+  yamllint .
38 42
 
39 43
 [testenv:bandit]
40 44
 commands = bandit -r ansible kolla_ansible tests tools

Loading…
Cancel
Save