Update linting jobs

The linters job did not run any tests at all since there was no
linters environment and the default environment only removes all pyc
files.

Add a linters environment that runs ansible-lint and syntax tests.

Remove the ansible-linters template, it's not needed anymore, the
linters jobs are enough.

The ansible-linters legacy jobs failed to detect some problems, mark
them with noqa for now.

Change-Id: Ibfa5ae179a98c57df2151cc633eb849ec8359a95
This commit is contained in:
Andreas Jaeger 2019-05-12 20:10:40 +02:00
parent a052773a2f
commit ffe104c743
8 changed files with 24 additions and 16 deletions

View File

@ -14,7 +14,7 @@
become: yes
- name: Listen for IPSEC connections
shell: ipsec whack --listen
shell: ipsec whack --listen # noqa 305
become: yes
ignore_errors: true
when:

View File

@ -5,40 +5,40 @@
# have some variables provided that will help us do this in a better way.
- name: Get the current node's internal API IP address
shell: grep "{{ ansible_hostname }}.internalapi$" /etc/hosts | awk '{print $1}'
shell: grep "{{ ansible_hostname }}.internalapi$" /etc/hosts | awk '{print $1}' # noqa 306
register: current_internalapi_ip_register
changed_when: false
# This outputs the hosts as lines consisting of <fqdn>,<IP>
- name: Get the controller internal API hostnames and IP addresses
shell: grep "{{ overcloud_controller_identifier }}.*internalapi$" /etc/hosts | awk '{print $3","$1}'
shell: grep "{{ overcloud_controller_identifier }}.*internalapi$" /etc/hosts | awk '{print $3","$1}' # noqa 306
register: internalapi_controllers_register
changed_when: false
- name: Get the current node's storage IP address
shell: grep "{{ ansible_hostname }}.storage$" /etc/hosts | awk '{print $1}'
shell: grep "{{ ansible_hostname }}.storage$" /etc/hosts | awk '{print $1}' # noqa 306
register: current_storage_ip_register
changed_when: false
# This outputs the hosts as lines consisting of <fqdn>,<IP>
- name: Get the controller storage hostnames and IP addresses
shell: grep "{{ overcloud_controller_identifier }}.*storage$" /etc/hosts | awk '{print $3","$1}'
shell: grep "{{ overcloud_controller_identifier }}.*storage$" /etc/hosts | awk '{print $3","$1}' # noqa 306
register: storage_controllers_register
changed_when: false
- name: Get the current node's storagemgmt IP address
shell: grep "{{ ansible_hostname }}.storagemgmt$" /etc/hosts | awk '{print $1}'
shell: grep "{{ ansible_hostname }}.storagemgmt$" /etc/hosts | awk '{print $1}' # noqa 306
register: current_storagemgmt_ip_register
changed_when: false
# This outputs the hosts as lines consisting of <fqdn>,<IP>
- name: Get the controller storagemgmt hostnames and IP addresses
shell: grep "{{ overcloud_controller_identifier }}.*storagemgmt$" /etc/hosts | awk '{print $3","$1}'
shell: grep "{{ overcloud_controller_identifier }}.*storagemgmt$" /etc/hosts | awk '{print $3","$1}' # noqa 306
register: storagemgmt_controllers_register
changed_when: false
- name: Get the current node's ctlplane IP address
shell: grep "{{ ansible_hostname }}.ctlplane$" /etc/hosts | awk '{print $1}'
shell: grep "{{ ansible_hostname }}.ctlplane$" /etc/hosts | awk '{print $1}' # noqa 306
register: current_ctlplane_ip_register
changed_when: false

View File

@ -55,7 +55,7 @@
# We force the restart of IPSEC here since adding it as a handler was
# getting run between the loop above, which is not desirable.
- name: Force restart IPSEC
shell: ipsec restart
shell: ipsec restart # noqa 301 305
- meta: flush_handlers

View File

@ -23,7 +23,7 @@
# This returns the hostname (short) of the node hosting the VIP
- name: Determine which node is hosting the VIP
shell: pcs status | grep ip- | sed 's/ip-//' | awk '{print $1"\t"$4}' | grep "{{ networks[0]['vips'][0]['ip'] }}" | awk '{print $2}'
shell: pcs status | grep ip- | sed 's/ip-//' | awk '{print $1"\t"$4}' | grep "{{ networks[0]['vips'][0]['ip'] }}" | awk '{print $2}' # noqa 306
register: node_hosting_the_vip
when: pacemaker_running|bool
@ -42,7 +42,7 @@
# We force the restart of IPSEC here since adding it as a handler was
# getting run between the loop above, which is not desirable.
- name: Force restart IPSEC
shell: ipsec restart
shell: ipsec restart # noqa 301 305
# Permissions gotten from http://www.linux-ha.org/doc/dev-guides/_installing_and_packaging_resource_agents.html
- name: Install TripleO IPSEC resource agent

View File

@ -7,7 +7,7 @@
ignore_errors: true
- name: Delete IPSec pacemaker resource agents
shell: "pcs status | grep tripleo-ipsec | awk '{print $1}' | xargs -n1 pcs resource delete"
shell: "pcs status | grep tripleo-ipsec | awk '{print $1}' | xargs -n1 pcs resource delete" # noqa 306
when:
- pacemaker_running|bool
- node_running_ipsec_agent_register.rc == 0

3
test-requirements.txt Normal file
View File

@ -0,0 +1,3 @@
pbr>=1.6
ansible
ansible-lint

11
tox.ini
View File

@ -7,8 +7,8 @@ envlist = linters
usedevelop = True
install_command =
pip install -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} {opts} {packages}
#deps =
# -r{toxinidir}/test-requirements.txt
deps =
-r{toxinidir}/test-requirements.txt
commands =
/usr/bin/find . -type f -name "*.pyc" -delete
passenv =
@ -27,6 +27,13 @@ whitelist_externals =
setenv =
VIRTUAL_ENV={envdir}
[testenv:linters]
setenv =
{[testenv:ansible]setenv}
commands =
{[testenv:ansible-lint]commands}
{[testenv:ansible-syntax]commands}
[testenv:ansible]
#deps =
# {[testenv]deps}

View File

@ -1,6 +1,4 @@
- project:
templates:
- ansible-lint-jobs
check:
jobs:
- openstack-tox-linters