Verify YAML syntax in gates

This patchset implements yamllint test to all *.yml
files.

Also fixes syntax errors to make jobs to pass.

Change-Id: I3186adf9835b4d0cada272d156b17d1bc9c2b799
This commit is contained in:
Eduardo Gonzalez 2018-02-25 12:02:00 +01:00
parent 7851de5c3c
commit ea1a1dee0d
50 changed files with 160 additions and 158 deletions

15
.yamllint Normal file
View File

@ -0,0 +1,15 @@
extends: default
ignore: |
.tox/
rules:
line-length: disable
truthy: disable
braces:
max-spaces-inside: 1
comments:
# Ignore first space in comment because we set default options as:
#openstack_version: "pike"
require-starting-space: true
ignore: |
etc/kolla/globals.yml

View File

@ -1,3 +1,4 @@
---
- project: - project:
check: check:
jobs: jobs:
@ -72,7 +73,7 @@
vars: vars:
scenario: aio scenario: aio
roles: roles:
- zuul: openstack-infra/zuul-jobs - zuul: openstack-infra/zuul-jobs
- job: - job:
name: kolla-ansible-centos-source name: kolla-ansible-centos-source

View File

@ -62,7 +62,7 @@ container_proxy:
# By default, Kolla API services bind to the network address assigned # By default, Kolla API services bind to the network address assigned
# to the api_interface. Allow the bind address to be an override. # to the api_interface. Allow the bind address to be an override.
api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}" api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
################ ################
# Chrony options # Chrony options
@ -98,14 +98,14 @@ docker_restart_policy_retry: "10"
# Common options used throughout Docker # Common options used throughout Docker
docker_common_options: docker_common_options:
auth_email: "{{ docker_registry_email }}" auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}" auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}" auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}" auth_username: "{{ docker_registry_username }}"
environment: environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}" restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}" restart_retries: "{{ docker_restart_policy_retry }}"
#################### ####################
@ -336,7 +336,7 @@ supported_policy_format_list:
# In the context of multi-regions, list here the name of all your regions. # In the context of multi-regions, list here the name of all your regions.
multiple_regions_names: multiple_regions_names:
- "{{ openstack_region_name }}" - "{{ openstack_region_name }}"
openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min }}" openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min }}"
openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}" openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}"
@ -350,11 +350,11 @@ nova_console: "novnc"
# OpenStack authentication string. You should only need to override these if you # OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user. # are changing the admin tenant/project or user.
openstack_auth: openstack_auth:
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}" auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
username: "admin" username: "admin"
password: "{{ keystone_admin_password }}" password: "{{ keystone_admin_password }}"
project_name: "admin" project_name: "admin"
domain_name: "default" domain_name: "default"
# Endpoint type used to connect with OpenStack services with ansible modules. # Endpoint type used to connect with OpenStack services with ansible modules.
# Valid options are [ public, internal, admin ] # Valid options are [ public, internal, admin ]

View File

@ -37,4 +37,3 @@
roles: roles:
- { role: baremetal, - { role: baremetal,
tags: baremetal } tags: baremetal }

View File

@ -93,7 +93,7 @@
- enable_host_ntp | bool - enable_host_ntp | bool
- name: Synchronizing time one-time - name: Synchronizing time one-time
command: ntpd -gq command: ntpd -gq
become: True become: True
when: enable_host_ntp | bool when: enable_host_ntp | bool

View File

@ -31,7 +31,7 @@
mode: "0660" mode: "0660"
become: true become: true
with_items: with_items:
- "rabbitmq-env.conf" - "rabbitmq-env.conf"
- name: Template ssh keys - name: Template ssh keys
template: template:

View File

@ -11,7 +11,7 @@
auth: "{{ '{{ openstack_ceilometer_auth }}' }}" auth: "{{ '{{ openstack_ceilometer_auth }}' }}"
endpoint_type: "{{ openstack_interface }}" endpoint_type: "{{ openstack_interface }}"
module_extra_vars: module_extra_vars:
openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}" openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
run_once: True run_once: True
- name: Associate the ResellerAdmin role and ceilometer user - name: Associate the ResellerAdmin role and ceilometer user
@ -25,6 +25,6 @@
auth: "{{ '{{ openstack_ceilometer_auth }}' }}" auth: "{{ '{{ openstack_ceilometer_auth }}' }}"
endpoint_type: "{{ openstack_interface }}" endpoint_type: "{{ openstack_interface }}"
module_extra_vars: module_extra_vars:
openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}" openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
when: enable_swift | bool when: enable_swift | bool
run_once: True run_once: True

View File

@ -49,4 +49,3 @@
with_dict: "{{ chrony_services }}" with_dict: "{{ chrony_services }}"
notify: notify:
- Restart chrony container - Restart chrony container

View File

@ -28,7 +28,7 @@ common_services:
- "/dev/:/dev/" - "/dev/:/dev/"
- "/run/:/run/:shared" - "/run/:/run/:shared"
- "kolla_logs:/var/log/kolla/" - "kolla_logs:/var/log/kolla/"
# DUMMY_ENVIRONMENT is needed because empty environment is not supported # DUMMY_ENVIRONMENT is needed because empty environment is not supported
cron: cron:
container_name: cron container_name: cron
enabled: True enabled: True

View File

@ -79,7 +79,7 @@
vars: vars:
service: "{{ designate_services['designate-worker'] }}" service: "{{ designate_services['designate-worker'] }}"
template: template:
src: "{{ item }}" src: "{{ item }}"
dest: "{{ node_config_directory }}/designate-worker/pools.yaml" dest: "{{ node_config_directory }}/designate-worker/pools.yaml"
mode: "0660" mode: "0660"
become: true become: true

View File

@ -1,7 +1,7 @@
--- ---
- name: Destroying Kolla host configuration - name: Destroying Kolla host configuration
become: true become: true
script: ../tools/cleanup-host script: ../tools/cleanup-host
environment: environment:
enable_haproxy: "{{ enable_haproxy }}" enable_haproxy: "{{ enable_haproxy }}"
enable_swift: "{{ enable_swift }}" enable_swift: "{{ enable_swift }}"

View File

@ -45,4 +45,3 @@
or glance_conf.changed | bool or glance_conf.changed | bool
or policy_overwriting.changed | bool or policy_overwriting.changed | bool
or glance_registry_container.changed | bool or glance_registry_container.changed | bool

View File

@ -125,4 +125,3 @@
notify: notify:
- Restart glance-api container - Restart glance-api container
- Restart glance-registry container - Restart glance-registry container

View File

@ -109,4 +109,3 @@
with_dict: "{{ haproxy_services }}" with_dict: "{{ haproxy_services }}"
notify: notify:
- "Restart {{ item.key }} container" - "Restart {{ item.key }} container"

View File

@ -13,7 +13,7 @@
with_dict: "{{ horizon_services }}" with_dict: "{{ horizon_services }}"
- set_fact: - set_fact:
custom_policy: [] custom_policy: []
- include: policy_item.yml - include: policy_item.yml
vars: vars:
@ -131,4 +131,3 @@
- horizon.enabled | bool - horizon.enabled | bool
notify: notify:
- Restart horizon container - Restart horizon container

View File

@ -15,67 +15,67 @@
run_once: true run_once: true
- block: - block:
- name: Stop MariaDB containers - name: Stop MariaDB containers
kolla_docker: kolla_docker:
name: "{{ mariadb_service.container_name }}" name: "{{ mariadb_service.container_name }}"
action: "stop_container" action: "stop_container"
- name: Run MariaDB wsrep recovery - name: Run MariaDB wsrep recovery
kolla_docker: kolla_docker:
action: "start_container" action: "start_container"
common_options: "{{ docker_common_options }}" common_options: "{{ docker_common_options }}"
environment: environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
BOOTSTRAP_ARGS: "--wsrep-recover" BOOTSTRAP_ARGS: "--wsrep-recover"
image: "{{ mariadb_service.image }}" image: "{{ mariadb_service.image }}"
labels: labels:
BOOTSTRAP: BOOTSTRAP:
name: "{{ mariadb_service.container_name }}" name: "{{ mariadb_service.container_name }}"
restart_policy: "never" restart_policy: "never"
volumes: "{{ mariadb_service.volumes }}" volumes: "{{ mariadb_service.volumes }}"
- name: Stop MariaDB containers - name: Stop MariaDB containers
kolla_docker: kolla_docker:
name: "{{ mariadb_service.container_name }}" name: "{{ mariadb_service.container_name }}"
action: "stop_container" action: "stop_container"
- name: Copying MariaDB log file to /tmp - name: Copying MariaDB log file to /tmp
shell: "docker cp {{ mariadb_service.container_name }}:/var/log/kolla/mariadb/mariadb.log /tmp/mariadb_tmp.log" shell: "docker cp {{ mariadb_service.container_name }}:/var/log/kolla/mariadb/mariadb.log /tmp/mariadb_tmp.log"
- name: Get MariaDB wsrep recovery seqno - name: Get MariaDB wsrep recovery seqno
shell: "tail -n 200 /tmp/mariadb_tmp.log | grep Recovered | tail -1 | awk '{print $7}' | awk -F'\n' '{print $1}' | awk -F':' '{print $2}'" shell: "tail -n 200 /tmp/mariadb_tmp.log | grep Recovered | tail -1 | awk '{print $7}' | awk -F'\n' '{print $1}' | awk -F':' '{print $2}'"
register: wsrep_recovery_seqno register: wsrep_recovery_seqno
- name: Removing MariaDB log file from /tmp - name: Removing MariaDB log file from /tmp
file: path=/tmp/mariadb_tmp.log state=absent file: path=/tmp/mariadb_tmp.log state=absent
changed_when: false changed_when: false
check_mode: no check_mode: no
- name: Registering MariaDB seqno variable - name: Registering MariaDB seqno variable
set_fact: set_fact:
seqno: "{{ wsrep_recovery_seqno.stdout_lines[0] }}" seqno: "{{ wsrep_recovery_seqno.stdout_lines[0] }}"
changed_when: false changed_when: false
- name: Comparing seqno value on all mariadb hosts - name: Comparing seqno value on all mariadb hosts
shell: shell:
cmd: | cmd: |
if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} && if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} &&
{{ hostvars[inventory_hostname]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[inventory_hostname]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^[0-9]+$ &&
{{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi {{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi
with_items: "{{ groups['mariadb'] }}" with_items: "{{ groups['mariadb'] }}"
register: seqno_compare register: seqno_compare
args: args:
executable: /bin/bash executable: /bin/bash
changed_when: false changed_when: false
- name: Writing hostname of host with the largest seqno to temp file - name: Writing hostname of host with the largest seqno to temp file
local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644 local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644
changed_when: false changed_when: false
when: seqno_compare.results | map(attribute='stdout') | join('') == "" when: seqno_compare.results | map(attribute='stdout') | join('') == ""
- name: Registering mariadb_recover_inventory_name from temp file - name: Registering mariadb_recover_inventory_name from temp file
set_fact: set_fact:
mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}" mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
when: when:
- mariadb_recover_inventory_name is not defined - mariadb_recover_inventory_name is not defined

View File

@ -37,4 +37,3 @@
- service.enabled | bool - service.enabled | bool
- action != "config" - action != "config"
notify: Restart memcached container notify: Restart memcached container

View File

@ -86,4 +86,3 @@
group: "{{ config_owner_group }}" group: "{{ config_owner_group }}"
when: inventory_hostname in groups['compute'] when: inventory_hostname in groups['compute']
with_sequence: start=1 end={{ num_nova_fake_per_node }} with_sequence: start=1 end={{ num_nova_fake_per_node }}

View File

@ -436,4 +436,3 @@
with_dict: "{{ neutron_services }}" with_dict: "{{ neutron_services }}"
notify: notify:
- "Restart {{ item.key }} container" - "Restart {{ item.key }} container"

View File

@ -111,7 +111,7 @@ nova_services:
group: "compute" group: "compute"
image: "{{ nova_compute_image_full }}" image: "{{ nova_compute_image_full }}"
environment: environment:
LIBGUESTFS_BACKEND: "direct" LIBGUESTFS_BACKEND: "direct"
privileged: True privileged: True
enabled: "{{ not enable_nova_fake | bool }}" enabled: "{{ not enable_nova_fake | bool }}"
ipc_mode: "host" ipc_mode: "host"

View File

@ -1,3 +1,4 @@
---
- name: Install package python-os-xenapi - name: Install package python-os-xenapi
package: package:
name: python-os-xenapi name: python-os-xenapi

View File

@ -203,4 +203,3 @@
with_dict: "{{ nova_services }}" with_dict: "{{ nova_services }}"
notify: notify:
- "Restart {{ item.key }} container" - "Restart {{ item.key }} container"

View File

@ -70,4 +70,3 @@
with_dict: "{{ openvswitch_services }}" with_dict: "{{ openvswitch_services }}"
notify: notify:
- "Restart {{ item.key }} container" - "Restart {{ item.key }} container"

View File

@ -50,7 +50,7 @@ ovsdpdk_services:
#################### ####################
ovs_bridge_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}" ovs_bridge_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
ovs_port_mappings: "{% for bridge in neutron_bridge_name.split(',') %} {{ neutron_external_interface.split(',')[loop.index0] }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}" ovs_port_mappings: "{% for bridge in neutron_bridge_name.split(',') %} {{ neutron_external_interface.split(',')[loop.index0] }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
dpdk_tunnel_interface : "{{neutron_external_interface}}" dpdk_tunnel_interface: "{{neutron_external_interface}}"
dpdk_tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['address'] }}" dpdk_tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['address'] }}"
tunnel_interface_network: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['network']}}/{{hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['netmask']}}" tunnel_interface_network: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['network']}}/{{hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['netmask']}}"
tunnel_interface_cidr: "{{dpdk_tunnel_interface_address}}/{{ tunnel_interface_network | ipaddr('prefix') }}" tunnel_interface_cidr: "{{dpdk_tunnel_interface_address}}/{{ tunnel_interface_network | ipaddr('prefix') }}"

View File

@ -50,7 +50,7 @@
or inventory_hostname in groups['neutron-l3-agent'] or inventory_hostname in groups['neutron-l3-agent']
or inventory_hostname in groups['neutron-metadata-agent'] or inventory_hostname in groups['neutron-metadata-agent']
or inventory_hostname in groups['neutron-vpnaas-agent']) or inventory_hostname in groups['neutron-vpnaas-agent'])
- ovs_physical_port_policy == 'indexed' - ovs_physical_port_policy == 'indexed'
- name: Restart ovsdpdk-vswitchd container - name: Restart ovsdpdk-vswitchd container
vars: vars:
@ -86,7 +86,7 @@
or inventory_hostname in groups['neutron-l3-agent'] or inventory_hostname in groups['neutron-l3-agent']
or inventory_hostname in groups['neutron-metadata-agent'] or inventory_hostname in groups['neutron-metadata-agent']
or inventory_hostname in groups['neutron-vpnaas-agent']) or inventory_hostname in groups['neutron-vpnaas-agent'])
- ovs_physical_port_policy == 'named' - ovs_physical_port_policy == 'named'
- name: wait for dpdk tunnel ip - name: wait for dpdk tunnel ip
wait_for: wait_for:

View File

@ -19,7 +19,7 @@
- item.value.host_in_groups | bool - item.value.host_in_groups | bool
with_dict: "{{ ovsdpdk_services }}" with_dict: "{{ ovsdpdk_services }}"
notify: notify:
- "Restart {{ item.key }} container" - "Restart {{ item.key }} container"
- name: Copying ovs-dpdkctl tool - name: Copying ovs-dpdkctl tool
copy: copy:

View File

@ -8,4 +8,3 @@
- item.value.enabled | bool - item.value.enabled | bool
- item.value.host_in_groups | bool - item.value.host_in_groups | bool
with_dict: "{{ ovsdpdk_services }}" with_dict: "{{ ovsdpdk_services }}"

View File

@ -6,9 +6,9 @@
port: "{{ database_port }}" port: "{{ database_port }}"
with_items: "{{ groups['mariadb'] }}" with_items: "{{ groups['mariadb'] }}"
when: when:
- not enable_mariadb | bool - not enable_mariadb | bool
- enable_external_mariadb_load_balancer | bool - enable_external_mariadb_load_balancer | bool
- inventory_hostname in groups['haproxy'] - inventory_hostname in groups['haproxy']
- name: "Check if external database address is reachable from all hosts" - name: "Check if external database address is reachable from all hosts"
wait_for: wait_for:
@ -16,5 +16,5 @@
host: "{{ database_address }}" host: "{{ database_address }}"
port: "{{ database_port }}" port: "{{ database_port }}"
when: when:
- not enable_mariadb | bool - not enable_mariadb | bool
- not enable_external_mariadb_load_balancer | bool - not enable_external_mariadb_load_balancer | bool

View File

@ -10,10 +10,8 @@
key: "{{ config_owner_group }}" key: "{{ config_owner_group }}"
register: getent_group register: getent_group
#(duonghq) it's only a basic check, should be refined later # NOTE(duonghq): it's only a basic check, should be refined later
- name: Check if ansible user can do passwordless sudo - name: Check if ansible user can do passwordless sudo
shell: sudo -n true shell: sudo -n true
register: result register: result
failed_when: result | failed failed_when: result | failed

View File

@ -39,10 +39,10 @@
- inventory_hostname in groups[service.group] - inventory_hostname in groups[service.group]
- service.enabled | bool - service.enabled | bool
with_items: with_items:
- "rabbitmq-env.conf" - "rabbitmq-env.conf"
- "rabbitmq.config" - "rabbitmq.config"
- "rabbitmq-clusterer.config" - "rabbitmq-clusterer.config"
- "definitions.json" - "definitions.json"
notify: notify:
- Restart rabbitmq container - Restart rabbitmq container

View File

@ -34,4 +34,3 @@ skydive_analyzer_image_full: "{{ skydive_analyzer_image }}:{{ skydive_analyzer_t
skydive_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-skydive-agent" skydive_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-skydive-agent"
skydive_agent_tag: "{{ openstack_release }}" skydive_agent_tag: "{{ openstack_release }}"
skydive_agent_image_full: "{{ skydive_agent_image }}:{{ skydive_agent_tag }}" skydive_agent_image_full: "{{ skydive_agent_image }}:{{ skydive_agent_tag }}"

View File

@ -41,4 +41,3 @@
- config_json.changed | bool - config_json.changed | bool
or skydive_conf.changed |bool or skydive_conf.changed |bool
or skydive_agent_container.changed | bool or skydive_agent_container.changed | bool

View File

@ -31,4 +31,3 @@
- container_facts['skydive_agent'] is not defined - container_facts['skydive_agent'] is not defined
- inventory_hostname in groups[skydive_agent.group] - inventory_hostname in groups[skydive_agent.group]
- skydive_agent.enabled | bool - skydive_agent.enabled | bool

View File

@ -1,4 +1,3 @@
--- ---
- name: Stopping Kolla containers - name: Stopping Kolla containers
command: /tmp/kolla-stop/tools/stop-containers command: /tmp/kolla-stop/tools/stop-containers

View File

@ -121,8 +121,8 @@ vitrage_datasource:
enabled: "{{ enable_cinder | bool }}" enabled: "{{ enable_cinder | bool }}"
- name: "neutron.network,neutron.port" - name: "neutron.network,neutron.port"
enabled: "{{ enable_neutron | bool }}" enabled: "{{ enable_neutron | bool }}"
#TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody. # TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody.
# Document process to deploy vitrage+heat. # Document process to deploy vitrage+heat.
- name: "heat.stack" - name: "heat.stack"
enabled: "no" enabled: "no"

View File

@ -4,7 +4,7 @@ kind: ReplicationController
metadata: metadata:
name: redis name: redis
spec: spec:
replicas: 2 replicas: 2
selector: selector:
name: redis name: redis
template: template:
@ -13,16 +13,16 @@ spec:
name: redis name: redis
spec: spec:
containers: containers:
- name: redis - name: redis
image: kubernetes/redis:v1 image: kubernetes/redis:v1
ports: ports:
- containerPort: 6379 - containerPort: 6379
resources: resources:
limits: limits:
cpu: "1" cpu: "1"
volumeMounts: volumeMounts:
- mountPath: /redis-master-data - mountPath: /redis-master-data
name: data name: data
volumes: volumes:
- name: data - name: data
emptyDir: {} emptyDir: {}

View File

@ -8,8 +8,8 @@ metadata:
name: redis-proxy name: redis-proxy
spec: spec:
containers: containers:
- name: proxy - name: proxy
image: kubernetes/redis-proxy:v1 image: kubernetes/redis-proxy:v1
ports: ports:
- containerPort: 6379 - containerPort: 6379
name: api name: api

View File

@ -4,7 +4,7 @@ kind: ReplicationController
metadata: metadata:
name: redis-sentinel name: redis-sentinel
spec: spec:
replicas: 2 replicas: 2
selector: selector:
redis-sentinel: "true" redis-sentinel: "true"
template: template:
@ -15,10 +15,10 @@ spec:
role: sentinel role: sentinel
spec: spec:
containers: containers:
- name: sentinel - name: sentinel
image: kubernetes/redis:v1 image: kubernetes/redis:v1
env: env:
- name: SENTINEL - name: SENTINEL
value: "true" value: "true"
ports: ports:
- containerPort: 26379 - containerPort: 26379

View File

@ -179,7 +179,7 @@ vitrage_keystone_password:
memcache_secret_key: memcache_secret_key:
#HMAC secret key # HMAC secret key
osprofiler_secret: osprofiler_secret:
nova_ssh_key: nova_ssh_key:

View File

@ -1,5 +1,5 @@
--- ---
features: features:
- Add ansible role for openstack congress project which provide - Add ansible role for openstack congress project which provide
policy as a service across any collection of cloud services in policy as a service across any collection of cloud services in
order to offer governance and compliance for dynamic infrastructures. order to offer governance and compliance for dynamic infrastructures.

View File

@ -1,3 +1,3 @@
--- ---
features: features:
- Add designate-producer ansible role. Orchestrates periodic tasks that are run by designate. - Add designate-producer ansible role. Orchestrates periodic tasks that are run by designate.

View File

@ -1,6 +1,6 @@
--- ---
features: features:
- Introduce OpenStack Infrastructure Optimization - Introduce OpenStack Infrastructure Optimization
service, also known as Watcher. This project makes service, also known as Watcher. This project makes
use of Ceilometer data to rebalance the cloud to use of Ceilometer data to rebalance the cloud to
meet declared goals and strategies. meet declared goals and strategies.

View File

@ -1,4 +1,4 @@
--- ---
other: other:
- Congress doesn't work correctly out of the box and will - Congress doesn't work correctly out of the box and will
not deploy. See Bug #1634641. not deploy. See Bug https://bugs.launchpad.net/kolla-ansible/+bug/1634641.

View File

@ -6,4 +6,3 @@ deprecations:
* /etc/kolla/config/database.conf * /etc/kolla/config/database.conf
* /etc/kolla/config/messaging.conf * /etc/kolla/config/messaging.conf

View File

@ -2,4 +2,3 @@
deprecations: deprecations:
- The nova-network was deprecated, we remove it from the nova ansible - The nova-network was deprecated, we remove it from the nova ansible
role. role.

View File

@ -1,3 +1,4 @@
---
- hosts: all - hosts: all
vars: vars:
logs_dir: "/tmp/logs" logs_dir: "/tmp/logs"

View File

@ -32,8 +32,8 @@
become: true become: true
tasks: tasks:
- name: Create log directory for node - name: Create log directory for node
file: file:
state: directory state: directory
path: /tmp/{{ inventory_hostname }} path: /tmp/{{ inventory_hostname }}
become: false become: false

View File

@ -30,11 +30,15 @@ setenv = VIRTUAL_ENV={envdir}
commands = python setup.py testr --coverage --testr-args='{posargs}' commands = python setup.py testr --coverage --testr-args='{posargs}'
[testenv:pep8] [testenv:pep8]
deps =
{[testenv]deps}
yamllint
commands = commands =
{toxinidir}/tools/run-bashate.sh {toxinidir}/tools/run-bashate.sh
flake8 {posargs} flake8 {posargs}
python {toxinidir}/tools/validate-all-file.py python {toxinidir}/tools/validate-all-file.py
bandit -r ansible kolla_ansible tests tools bandit -r ansible kolla_ansible tests tools
yamllint .
[testenv:bandit] [testenv:bandit]
commands = bandit -r ansible kolla_ansible tests tools commands = bandit -r ansible kolla_ansible tests tools