Adopt use of pre-commit linting

Follows the same configuration that was used on
tripleo-quickstart-extras and documented use on tripleo-docs.

Change-Id: Iba8a2db92137f9f6ad28f498627eb1b87039d99f
Story: https://tree.taiga.io/project/tripleo-ci-board/task/381
This commit is contained in:
Sorin Sbarnea
2018-12-06 11:11:42 +00:00
parent 40b50f763c
commit ed27a979d5
35 changed files with 396 additions and 294 deletions

18
.ansible-lint Normal file
View File

@@ -0,0 +1,18 @@
exclude_paths:
- roles/validate-ui/.travis.yml
parseable: true
rulesdir:
- ./ci-scripts/ansible_rules/
quiet: false
skip_list:
- ANSIBLE0006 # Using command rather than module we have a few use cases
# where we need to use curl and rsync
- ANSIBLE0007 # Using command rather than an argument to e.g file
# we have a lot of 'rm' command and we should use file module instead
- ANSIBLE0010 # Package installs should not use latest.
# Sometimes we need to update some packages.
- ANSIBLE0012 # Commands should not change things if nothing needs doing
- ANSIBLE0013 # Use Shell only when shell functionality is required
- ANSIBLE0016 # Tasks that run when changed should likely be handlers
# this requires refactoring roles, skipping for now
verbosity: 1

42
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,42 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: debug-statements
- id: flake8
additional_dependencies:
- hacking<1.2.0,>=1.1.0
- id: check-yaml
files: .*\.(yaml|yml)$
# commented to allow progressive enablement in smaller patches
# - repo: https://github.com/adrienverge/yamllint.git
# rev: v1.13.0
# hooks:
# - id: yamllint
# files: \.(yaml|yml)$
# types: [file, yaml]
# entry: yamllint --strict -f parsable
- repo: https://github.com/ansible/ansible-lint
rev: v3.5.1
hooks:
- id: ansible-lint
files: \.(yaml|yml)$
entry: ansible-lint --force-color -v
- repo: https://github.com/openstack-dev/bashate.git
rev: 0.6.0
hooks:
- id: bashate
entry: bashate --error . --verbose --ignore=E006,E040
# Run bashate check for all bash scripts
# Ignores the following rules:
# E006: Line longer than 79 columns (as many scripts use jinja
# templating, this is very difficult)
# E040: Syntax error determined using `bash -n` (as many scripts
# use jinja templating, this will often fail and the syntax
# error will be discovered in execution anyway)

6
.yamllint Normal file
View File

@@ -0,0 +1,6 @@
---
extends: default
rules:
line-length:
max: 180

View File

@@ -27,7 +27,7 @@ USER=centos
# makes some assumptions but good enough for now
nova keypair-add --pub-key ~/.ssh/id_rsa.pub bighammer || true
function tapper(){
function tapper {
set -x
NODENAME=test-node-$1

View File

@@ -1,3 +1,4 @@
#!/bin/bash
# Tripleo CI functions
# Revert a commit for tripleo ci
@@ -5,7 +6,7 @@
# $2 : hash id of commit to revert
# $3 : bug id of reason for revert (used to skip revert if found in commit
# that triggers ci).
function temprevert(){
function temprevert {
# Before reverting check to ensure this isn't the related fix
if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
echo "Skipping temprevert because bug fix $3 was found in git message."
@@ -24,7 +25,7 @@ function temprevert(){
# $2 : hash id of commit to pin too
# $3 : bug id of reason for the pin (used to skip revert if found in commit
# that triggers ci).
function pin(){
function pin {
# Before reverting check to ensure this isn't the related fix
if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
echo "Skipping pin because bug fix $3 was found in git message."
@@ -42,7 +43,7 @@ function pin(){
# $2 : Gerrit refspec to cherry pick
# $3 : bug id of reason for the cherry pick (used to skip cherry pick if found
# in commit that triggers ci).
function cherrypick(){
function cherrypick {
local PROJ_NAME=$1
local REFSPEC=$2
@@ -66,14 +67,14 @@ function cherrypick(){
# echo's out a project name from a ref
# $1 : e.g. openstack/nova:master:refs/changes/87/64787/3 returns nova
function filterref(){
function filterref {
PROJ=${1%%:*}
PROJ=${PROJ##*/}
echo $PROJ
}
# Mount a qcow image, copy in the delorean repositories and update the packages
function update_image(){
function update_image {
IMAGE=$1
MOUNTDIR=$(mktemp -d)
case ${IMAGE##*.} in
@@ -133,7 +134,7 @@ function update_image(){
# Decide if a particular cached artifact can be used in this CI test
# Takes a single argument representing the name of the artifact being checked.
function canusecache(){
function canusecache {
# If we are uploading to the cache then we shouldn't use it
[ "$CACHEUPLOAD" == 1 ] && return 1
@@ -165,7 +166,7 @@ function canusecache(){
return 0
}
function extract_logs(){
function extract_logs {
local name=$1
mkdir -p $WORKSPACE/logs/$name
local logs_tar="$WORKSPACE/logs/$name.tar.xz"
@@ -178,7 +179,7 @@ function extract_logs(){
fi
}
function postci(){
function postci {
local exit_val=${1:-0}
set -x
set +e
@@ -371,7 +372,7 @@ function stop_dstat {
ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill
}
function item_in_array () {
function item_in_array {
local item
for item in "${@:2}"; do
if [[ "$item" == "$1" ]]; then

View File

@@ -1,3 +1,4 @@
#!/bin/bash
# Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs
# just have the branch they're proposed to, e.g ZUUL_BRANCH, in both
# cases we need to set STABLE_RELEASE to match for tripleo.sh

View File

@@ -4,11 +4,11 @@ from __future__ import print_function
import argparse
import difflib
import json
import requests
import os
import requests
from colorama import init
from colorama import Fore
from colorama import init
GERRIT_DETAIL_API = "https://review.openstack.org/changes/{}/detail"
GERRIT_USER_NAME = "zuul"
@@ -16,8 +16,9 @@ ZUUL_PIPELINE = "check"
def parse_ci_message(message):
"""Convert zuul's gerrit message into a dict with job name as key and
job url as value
"""Convert zuul's gerrit message into a dict
Dictionary contains job name as key and job url as value
"""
jobs = {}
@@ -29,8 +30,7 @@ def parse_ci_message(message):
def get_file(logs_url, file):
"""Download a file from logs server for this job
"""
"""Download a file from logs server for this job"""
response = requests.get(logs_url + '/logs/' + file)
if response.ok:
@@ -39,8 +39,7 @@ def get_file(logs_url, file):
def get_last_jobs(change):
"""Get the last CI jobs execution at check pipeline for this review
"""
"""Get the last CI jobs execution at check pipeline for this review"""
last_jobs = {}
detail_url = GERRIT_DETAIL_API.format(change)
@@ -62,8 +61,9 @@ def get_last_jobs(change):
def download(jobs, file_path):
"""Download a file from all the specified jobs and return them as a
dictionary with job name as key and file content as value
"""Download a file from all the specified jobs
Return them as a dictionary with job name as key and file content as value
"""
downloaded_files = {}
for job, logs in jobs.iteritems():
@@ -76,9 +76,7 @@ def download(jobs, file_path):
def is_equal(lho_jobs, rho_jobs, file_path):
"""Check the differences of file_path between the lho and rho job sets and
print out them
"""
"""Prints differences of file_path between the lho and rho job sets"""
lho_files = download(lho_jobs, file_path)
rho_files = download(rho_jobs, file_path)

View File

@@ -1,3 +1,4 @@
#!/bin/bash
set -eux
set -o pipefail

View File

@@ -6,7 +6,7 @@ set -o pipefail
TMPFILE=$(mktemp)
TMP2FILE=$(mktemp)
function heat_resource_metadata() {
function heat_resource_metadata {
# Build os-collect-config command line arguments for the given heat
# resource, which when run, allow us to collect the heat completion
# signals.

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -eu -o pipefail
function usage(){
function usage {
echo "Helper script for downloading tripleo-ci jobs logs"
echo
echo "Example:"
@@ -10,7 +10,7 @@ function usage(){
echo "Downloads the logs and starts a shell from the logs root directory"
}
function finish(){
function finish {
rc=${rc:-$?}
trap - EXIT
cd $TDIR/../
@@ -18,7 +18,7 @@ function finish(){
PS1="JOBLOGS ]\$ " bash --noprofile --norc
}
function get_dirs(){
function get_dirs {
local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
local directories=""
directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")

View File

@@ -1,3 +1,4 @@
#!/bin/bash
export METRICS_START_TIMES=/tmp/metric-start-times
export METRICS_DATA_FILE=/tmp/metrics-data
@@ -17,9 +18,12 @@ function record_metric {
# called. NOTE: time metrics names must be unique.
function start_metric {
local NAME=$1
local START_TIME=$(date +%s)
local METRIC_NAME
local START_TIME
START_TIME=$(date +%s)
# we use : as our delimiter so convert to _. Also convert spaces and /'s.
local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
if grep -c "^$METRIC_NAME:" $METRICS_START_TIMES &>/dev/null; then
echo "start_metric has already been called for $NAME" >&2
@@ -33,18 +37,23 @@ function start_metric {
# The total time (in seconds) is calculated and logged to the metrics
# data file. NOTE: the end time is used as the DTS.
function stop_metric {
local END_TIME
local LINE
local METRIC_NAME
local NAME=$1
local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
local END_TIME=$(date +%s)
local START_TIME
local TOTAL_TIME
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
END_TIME=$(date +%s)
if ! grep -c "^$METRIC_NAME" $METRICS_START_TIMES &>/dev/null; then
echo "Please call start_metric before calling stop_metric for $NAME" >&2
exit 1
fi
local LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
local START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
local TOTAL_TIME="$(($END_TIME - $START_TIME))"
LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
TOTAL_TIME="$(($END_TIME - $START_TIME))"
record_metric "$METRIC_NAME" "$TOTAL_TIME" "$END_TIME"
}
function metrics_to_graphite {

View File

@@ -1,16 +1,18 @@
#!/bin/python3.4
from builtins import FileExistsError
import cgi
import cgitb
import fcntl
import os
import shutil
import sys
import tempfile
basedir="/var/www/html/"
basedir = "/var/www/html/"
print("Content-Type: text/html\n")
def saveform(form, storagedir):
for key in form.keys():
entry = form[key]
@@ -36,6 +38,7 @@ def saveform(form, storagedir):
fp.write(line)
fp.close()
def run():
if not os.environ.get("REMOTE_ADDR", "").startswith("192.168."):
@@ -73,5 +76,5 @@ def run():
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
sys.exit(run())
sys.exit(run())

View File

@@ -65,7 +65,7 @@ function is_featureset {
local type="${1}"
local featureset_file="${2}"
[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]
[[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]]
}
function run_with_timeout {

View File

@@ -21,7 +21,7 @@ set -x
# NOTE(bnemec): This function starts the port deletions in the background.
# To ensure they complete before you proceed, you must call "wait" after
# calling this function.
function delete_ports() {
function delete_ports {
local subnetid=${1:-}
if [ -z "$subnetid" ]; then
return
@@ -37,8 +37,7 @@ CONSOLE_LOG_PATH=/var/www/html/tebroker/console-logs/
nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs
# Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes.
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM
do
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM ; do
openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' | gzip > $CONSOLE_LOG_PATH/$server-console.log.gz || true
done
@@ -55,7 +54,7 @@ wait
# If there was a keypair for this specific run, delete it.
openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
function delete_stack() {
function delete_stack {
local stackname=$1
# Nothing to do if the specified stack doesn't exist
if ! heat stack-show $stackname; then

View File

@@ -23,8 +23,8 @@ import json
import logging
import logging.handlers
import os
import sys
import subprocess
import sys
import tempfile
import threading
import time
@@ -35,9 +35,11 @@ from novaclient import client as novaclient
from novaclient import exceptions
# 100Mb log files
maxBytes=1024*1024*100
maxBytes = 1024*1024*100
logging.basicConfig(filename="/var/www/html/tebroker/testenv-worker.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(
filename="/var/www/html/tebroker/testenv-worker.log",
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CallbackClient(gear.Client):
@@ -86,7 +88,7 @@ class TEWorkerThread(threading.Thread):
self.runJob()
except gear.InterruptedError:
logger.info('getJob interrupted...')
except:
except Exception:
logger.exception('Error while run_te_worker worker')
self.running = False
@@ -132,14 +134,15 @@ class TEWorkerThread(threading.Thread):
with tempfile.NamedTemporaryFile('r') as fp:
os.environ["TE_DATAFILE"] = fp.name
logger.info(
subprocess.check_output([self.scriptfiles[0],
subprocess.check_output([
self.scriptfiles[0],
self.num,
arguments.get("envsize","2"),
arguments.get("ucinstance",""),
arguments.get("envsize", "2"),
arguments.get("ucinstance", ""),
arguments.get("create_undercloud", ""),
arguments.get("ssh_key", ""),
arguments.get("net_iso", "multi-nic"),
arguments.get("compute_envsize","0"),
arguments.get("compute_envsize", "0"),
arguments.get("extra_nodes", "0"),
],
stderr=subprocess.STDOUT))
@@ -164,7 +167,8 @@ class TEWorkerThread(threading.Thread):
if not cb_job.running:
logger.error("No sign of the Callback job starting,"
"assuming its no longer present")
clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
logger.info(clientdata)
client.shutdown()
return
@@ -182,7 +186,8 @@ class TEWorkerThread(threading.Thread):
else:
logger.info('Returned from Job : %s', cb_job.data)
try:
clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.error(e.output)
raise
@@ -238,7 +243,7 @@ def _check_instance_alive(nclient, instance, event):
"""
if instance:
try:
i = nclient.servers.get(instance)
nclient.servers.get(instance)
except exceptions.NotFound:
# There is a very brief period of time where instance could be set
# and event not. It's unlikely to happen, but let's be safe.
@@ -254,7 +259,9 @@ def main(args=sys.argv[1:]):
'"locked" state while it calls back to the client. The '
'clients job is provided with data (contents of datafile)'
)
parser.add_argument('scriptfiles', nargs=2,
parser.add_argument(
'scriptfiles',
nargs=2,
help='Path to a script whos output is provided to the client')
parser.add_argument('--timeout', '-t', type=int, default=10800,
help='The maximum number of seconds to hold the '
@@ -271,7 +278,10 @@ def main(args=sys.argv[1:]):
global logger
logger = logging.getLogger('testenv-worker-' + opts.tenum)
logger.addHandler(logging.handlers.RotatingFileHandler("/var/www/html/tebroker/testenv-worker.log", maxBytes=maxBytes, backupCount=5))
logger.addHandler(logging.handlers.RotatingFileHandler(
"/var/www/html/tebroker/testenv-worker.log",
maxBytes=maxBytes,
backupCount=5))
logger.setLevel(logging.INFO)
logger.removeHandler(logger.handlers[0])
@@ -279,7 +289,11 @@ def main(args=sys.argv[1:]):
logger.setLevel(logging.DEBUG)
logger.info('Starting test-env worker with data %r', opts.scriptfiles)
te_worker = TEWorkerThread(opts.geard, opts.tenum, opts.timeout, opts.scriptfiles)
te_worker = TEWorkerThread(
opts.geard,
opts.tenum,
opts.timeout,
opts.scriptfiles)
te_worker.start()

View File

@@ -9,8 +9,7 @@ function set_env {
# The updates job already takes a long time, always use cache for it
[[ "$TOCI_JOBTYPE" =~ updates ]] && set_env "false"
# There are some projects that require images building
for PROJFULLREF in ${ZUUL_CHANGES//^/ };
do
for PROJFULLREF in ${ZUUL_CHANGES//^/ }; do
PROJ=${PROJFULLREF%%:*};
PROJ=${PROJ##*/};
[[ "$PROJ" =~ diskimage-builder|tripleo-image-elements|tripleo-puppet-elements|instack-undercloud|python-tripleoclient|tripleo-common ]] && set_env "true"

View File

@@ -250,7 +250,11 @@ function log {
}
function source_rc {
if [ $1 = "stackrc" ] ; then cloud="Undercloud"; else cloud="Overcloud"; fi
if [ $1 = "stackrc" ]; then
cloud="Undercloud"
else
cloud="Overcloud"
fi
echo "You must source a $1 file for the $cloud."
echo "Attempting to source $HOME/$1"
source $HOME/$1
@@ -665,8 +669,7 @@ function overcloud_deploy {
exitval=0
log "Deploy command arguments: $OVERCLOUD_DEPLOY_ARGS"
openstack overcloud deploy $OVERCLOUD_DEPLOY_ARGS || exitval=1
if [ $exitval -eq 1 ];
then
if [ $exitval -eq 1 ]; then
log "Overcloud create - FAILED!"
exit 1
fi
@@ -713,8 +716,7 @@ function overcloud_update {
log "Overcloud update started."
exitval=0
openstack overcloud deploy $OVERCLOUD_UPDATE_ARGS || exitval=1
if [ $exitval -eq 1 ];
then
if [ $exitval -eq 1 ]; then
log "Overcloud update - FAILED!"
exit 1
fi

View File

@@ -1,5 +1,6 @@
---
parameter_defaults:
ControlPlaneSubnetCidr: "24"
ControlPlaneDefaultRoute: 192.168.24.1
EC2MetadataIp: 192.168.24.1
DnsServers: ["8.8.8.8","8.8.4.4"]
DnsServers: ["8.8.8.8", "8.8.4.4"]

View File

@@ -1,3 +1,4 @@
---
parameter_defaults:
ControllerSchedulerHints:
'capabilities:node': 'controller-%index%'

View File

@@ -1,8 +1,8 @@
flake8
pytest
pytest-html
pytest-cov
mock
requests
pprint
pre-commit>=1.10 # MIT License
PyYAML

View File

@@ -21,9 +21,9 @@
import argparse
import json
import logging
import sys
import subprocess
import os
import subprocess
import sys
import tempfile
import textwrap
import threading
@@ -65,7 +65,8 @@ class TestCallback(object):
if time_waiting > 90:
logger.warn('%.1f seconds waiting for a worker.' % (time_waiting))
if "Couldn't retrieve env" in job.arguments or "Failed creating OVB stack" in job.arguments:
if "Couldn't retrieve env" in job.arguments or \
"Failed creating OVB stack" in job.arguments:
logger.error(job.arguments)
self.rv = 2
job.sendWorkComplete("")
@@ -80,7 +81,7 @@ class TestCallback(object):
try:
self.rv = subprocess.call(self.command)
except:
except Exception:
logger.exception("Error calling command")
self.rv = 2
@@ -195,14 +196,14 @@ def main(args=sys.argv[1:]):
job_params = {
"callback_name": callback_name,
"timeout": opts.timeout,
"envsize":opts.envsize,
"compute_envsize":opts.compute_envsize,
"ucinstance":opts.ucinstance,
"envsize": opts.envsize,
"compute_envsize": opts.compute_envsize,
"ucinstance": opts.ucinstance,
"create_undercloud": "true" if opts.create_undercloud else "",
"ssh_key":opts.ssh_key,
"net_iso":opts.net_iso,
"extra_nodes":opts.extra_nodes,
"job_identifier":job_identifier,
"ssh_key": opts.ssh_key,
"net_iso": opts.net_iso,
"extra_nodes": opts.extra_nodes,
"job_identifier": job_identifier,
}
job = gear.Job('lockenv', json.dumps(job_params))
client.submitJob(job)
@@ -227,5 +228,6 @@ def main(args=sys.argv[1:]):
logger.debug("Exiting with status : %d", cb.rv)
return cb.rv
if __name__ == '__main__':
exit(main())

View File

@@ -1,3 +1,4 @@
---
# Collect logs settings
# artcl_tar_gz: true

View File

@@ -1,3 +1,4 @@
---
# TRIPLEO-CI environment settings
undercloud_user: "{{ lookup('env','USER') }}"
non_root_user: "{{ undercloud_user }}"

View File

@@ -1,3 +1,4 @@
---
undercloud_type: ovb
use_testenv_broker: true
build_test_packages: true

View File

@@ -18,7 +18,8 @@ commands = pyflakes setup.py scripts
[testenv:linters]
basepython = python3
whitelist_externals = bash
commands = flake8 --max-line-length 80 {toxinidir} {posargs}
commands = python -m pre_commit run --source HEAD^ --origin HEAD
# deprecated: use linters instead. kept only as a convenience alias
[testenv:pep8]

View File

@@ -1,3 +1,4 @@
---
- nodeset:
name: two-centos-7-nodes
nodes: