Browse Source

Adopt use of pre-commit linting

Follows the same configuration that was used on
tripleo-quickstart-extras and documented use on tripleo-docs.

Change-Id: Iba8a2db92137f9f6ad28f498627eb1b87039d99f
Story: https://tree.taiga.io/project/tripleo-ci-board/task/381
changes/02/623202/13
Sorin Sbarnea 6 months ago
parent
commit
ed27a979d5

+ 18
- 0
.ansible-lint View File

@@ -0,0 +1,18 @@
1
+exclude_paths:
2
+    - roles/validate-ui/.travis.yml
3
+parseable: true
4
+rulesdir:
5
+    - ./ci-scripts/ansible_rules/
6
+quiet: false
7
+skip_list:
8
+    - ANSIBLE0006  # Using command rather than module we have a few use cases
9
+    # where we need to use curl and rsync
10
+    - ANSIBLE0007  # Using command rather than an argument to e.g file
11
+    # we have a lot of 'rm' command and we should use file module instead
12
+    - ANSIBLE0010  # Package installs should not use latest.
13
+    # Sometimes we need to update some packages.
14
+    - ANSIBLE0012  # Commands should not change things if nothing needs doing
15
+    - ANSIBLE0013  # Use Shell only when shell functionality is required
16
+    - ANSIBLE0016  # Tasks that run when changed should likely be handlers
17
+    # this requires refactoring roles, skipping for now
18
+verbosity: 1

+ 42
- 0
.pre-commit-config.yaml View File

@@ -0,0 +1,42 @@
1
+---
2
+repos:
3
+  - repo: https://github.com/pre-commit/pre-commit-hooks
4
+    rev: v2.0.0
5
+    hooks:
6
+      - id: trailing-whitespace
7
+      - id: mixed-line-ending
8
+      - id: check-byte-order-marker
9
+      - id: check-executables-have-shebangs
10
+      - id: check-merge-conflict
11
+      - id: debug-statements
12
+      - id: flake8
13
+        additional_dependencies:
14
+          - hacking<1.2.0,>=1.1.0
15
+      - id: check-yaml
16
+        files: .*\.(yaml|yml)$
17
+  # commented to allow progressive enablement in smaller patches
18
+  # - repo: https://github.com/adrienverge/yamllint.git
19
+  #   rev: v1.13.0
20
+  #   hooks:
21
+  #     - id: yamllint
22
+  #       files: \.(yaml|yml)$
23
+  #       types: [file, yaml]
24
+  #       entry: yamllint --strict -f parsable
25
+  - repo: https://github.com/ansible/ansible-lint
26
+    rev: v3.5.1
27
+    hooks:
28
+      - id: ansible-lint
29
+        files: \.(yaml|yml)$
30
+        entry: ansible-lint --force-color -v
31
+  - repo: https://github.com/openstack-dev/bashate.git
32
+    rev: 0.6.0
33
+    hooks:
34
+      - id: bashate
35
+        entry: bashate --error . --verbose --ignore=E006,E040
36
+        # Run bashate check for all bash scripts
37
+        # Ignores the following rules:
38
+        # E006: Line longer than 79 columns (as many scripts use jinja
39
+        #       templating, this is very difficult)
40
+        # E040: Syntax error determined using `bash -n` (as many scripts
41
+        #       use jinja templating, this will often fail and the syntax
42
+        #       error will be discovered in execution anyway)

+ 6
- 0
.yamllint View File

@@ -0,0 +1,6 @@
1
+---
2
+extends: default
3
+
4
+rules:
5
+  line-length:
6
+    max: 180

+ 10
- 10
playbooks/tripleo-ci/templates/toci_quickstart.sh.j2 View File

@@ -151,16 +151,16 @@ done
151 151
     for playbook in {{ " ".join(playbooks) }}; do
152 152
         echo ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}
153 153
         $QUICKSTART_INSTALL_CMD \
154
-           ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
155
-           {{ nodes_args }} \
156
-           {{ featureset_conf }} \
157
-           {{ env_vars }} \
158
-           {{ extra_vars }} \
159
-           {{ vxlan_vars }} \
160
-           $DEFAULT_ARGS \
161
-           --extra-vars @{{ workspace }}/logs/zuul-variables.yaml \
162
-            $LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-} \
163
-            2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
154
+            ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
155
+            {{ nodes_args }} \
156
+            {{ featureset_conf }} \
157
+            {{ env_vars }} \
158
+            {{ extra_vars }} \
159
+            {{ vxlan_vars }} \
160
+            $DEFAULT_ARGS \
161
+            --extra-vars @{{ workspace }}/logs/zuul-variables.yaml \
162
+                $LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-} \
163
+                2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
164 164
 
165 165
         # Print status of playbook run
166 166
         [[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"

+ 3
- 3
scripts/bighammer.sh View File

@@ -27,7 +27,7 @@ USER=centos
27 27
 # makes some assumptions but good enough for now
28 28
 nova keypair-add --pub-key ~/.ssh/id_rsa.pub bighammer || true
29 29
 
30
-function tapper(){
30
+function tapper {
31 31
     set -x
32 32
     NODENAME=test-node-$1
33 33
 
@@ -35,8 +35,8 @@ function tapper(){
35 35
     #trap "nova delete $NODENAME" RETURN ERR
36 36
     sleep 60
37 37
     if [ "$(nova show $NODENAME | awk '/status/ {print $4}')" != "ACTIVE" ] ; then
38
-          nova show $NODENAME
39
-          return 1
38
+        nova show $NODENAME
39
+        return 1
40 40
     fi
41 41
 
42 42
     IP=$(nova show $NODENAME | awk '/private network/ {print $5}')

+ 26
- 26
scripts/bootstrap-overcloud-full.sh View File

@@ -51,30 +51,30 @@ except:
51 51
 export ELEMENTS_PATH="${COMMON_ELEMENTS_PATH}:/usr/share/instack-undercloud:/usr/share/tripleo-image-elements:/usr/share/tripleo-puppet-elements"
52 52
 ELEMENTS=$(\
53 53
 tripleo-build-images \
54
-  --image-json-output \
55
-  --image-name overcloud-full \
56
-  --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
57
-  --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
58
-  | jq '. | .[0].elements | map(.+" ") | add' \
59
-  | sed 's/"//g')
54
+    --image-json-output \
55
+    --image-name overcloud-full \
56
+    --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
57
+    --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
58
+    | jq '. | .[0].elements | map(.+" ") | add' \
59
+    | sed 's/"//g')
60 60
 
61 61
 # delorean-repo is excluded b/c we've already run --repo-setup on this node and
62 62
 # we don't want to overwrite that.
63 63
 sudo -E instack \
64
-  -e centos7 \
65
-     enable-packages-install \
66
-     install-types \
67
-     $ELEMENTS \
68
-  -k extra-data \
69
-     pre-install \
70
-     install \
71
-     post-install \
72
-  -b 05-fstab-rootfs-label \
73
-     00-fix-requiretty \
74
-     90-rebuild-ramdisk \
75
-     00-usr-local-bin-secure-path \
76
-  -x delorean-repo \
77
-  -d
64
+    -e centos7 \
65
+        enable-packages-install \
66
+        install-types \
67
+        $ELEMENTS \
68
+    -k extra-data \
69
+        pre-install \
70
+        install \
71
+        post-install \
72
+    -b 05-fstab-rootfs-label \
73
+        00-fix-requiretty \
74
+        90-rebuild-ramdisk \
75
+        00-usr-local-bin-secure-path \
76
+    -x delorean-repo \
77
+    -d
78 78
 
79 79
 # In the imported elements we have remove-machine-id.  In multinode
80 80
 # jobs that could mean we end up without /etc/machine-id.  Make sure
@@ -83,12 +83,12 @@ sudo -E instack \
83 83
 
84 84
 PACKAGES=$(\
85 85
 tripleo-build-images \
86
-  --image-json-output \
87
-  --image-name overcloud-full \
88
-  --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
89
-  --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
90
-  | jq '. | .[0].packages | .[] | tostring' \
91
-  | sed 's/"//g')
86
+    --image-json-output \
87
+    --image-name overcloud-full \
88
+    --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
89
+    --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
90
+    | jq '. | .[0].packages | .[] | tostring' \
91
+    | sed 's/"//g')
92 92
 
93 93
 # Install additional packages expected by the image
94 94
 sudo yum -y install $PACKAGES

+ 11
- 10
scripts/common_functions.sh View File

@@ -1,3 +1,4 @@
1
+#!/bin/bash
1 2
 # Tripleo CI functions
2 3
 
3 4
 # Revert a commit for tripleo ci
@@ -5,7 +6,7 @@
5 6
 # $2 : hash id of commit to revert
6 7
 # $3 : bug id of reason for revert (used to skip revert if found in commit
7 8
 #      that triggers ci).
8
-function temprevert(){
9
+function temprevert {
9 10
     # Before reverting check to ensure this isn't the related fix
10 11
     if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
11 12
         echo "Skipping temprevert because bug fix $3 was found in git message."
@@ -24,7 +25,7 @@ function temprevert(){
24 25
 # $2 : hash id of commit to pin too
25 26
 # $3 : bug id of reason for the pin (used to skip revert if found in commit
26 27
 #      that triggers ci).
27
-function pin(){
28
+function pin {
28 29
     # Before reverting check to ensure this isn't the related fix
29 30
     if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
30 31
         echo "Skipping pin because bug fix $3 was found in git message."
@@ -42,7 +43,7 @@ function pin(){
42 43
 # $2 : Gerrit refspec to cherry pick
43 44
 # $3 : bug id of reason for the cherry pick (used to skip cherry pick if found
44 45
 #      in commit that triggers ci).
45
-function cherrypick(){
46
+function cherrypick {
46 47
     local PROJ_NAME=$1
47 48
     local REFSPEC=$2
48 49
 
@@ -66,14 +67,14 @@ function cherrypick(){
66 67
 
67 68
 # echo's out a project name from a ref
68 69
 # $1 : e.g. openstack/nova:master:refs/changes/87/64787/3 returns nova
69
-function filterref(){
70
+function filterref {
70 71
     PROJ=${1%%:*}
71 72
     PROJ=${PROJ##*/}
72 73
     echo $PROJ
73 74
 }
74 75
 
75 76
 # Mount a qcow image, copy in the delorean repositories and update the packages
76
-function update_image(){
77
+function update_image {
77 78
     IMAGE=$1
78 79
     MOUNTDIR=$(mktemp -d)
79 80
     case ${IMAGE##*.} in
@@ -133,7 +134,7 @@ function update_image(){
133 134
 
134 135
 # Decide if a particular cached artifact can be used in this CI test
135 136
 # Takes a single argument representing the name of the artifact being checked.
136
-function canusecache(){
137
+function canusecache {
137 138
 
138 139
     # If we are uploading to the cache then we shouldn't use it
139 140
     [ "$CACHEUPLOAD" == 1 ] && return 1
@@ -165,7 +166,7 @@ function canusecache(){
165 166
     return 0
166 167
 }
167 168
 
168
-function extract_logs(){
169
+function extract_logs {
169 170
     local name=$1
170 171
     mkdir -p $WORKSPACE/logs/$name
171 172
     local logs_tar="$WORKSPACE/logs/$name.tar.xz"
@@ -178,7 +179,7 @@ function extract_logs(){
178 179
     fi
179 180
 }
180 181
 
181
-function postci(){
182
+function postci {
182 183
     local exit_val=${1:-0}
183 184
     set -x
184 185
     set +e
@@ -368,10 +369,10 @@ function echo_vars_to_deploy_env {
368 369
 }
369 370
 
370 371
 function stop_dstat {
371
-	ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill
372
+    ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill
372 373
 }
373 374
 
374
-function item_in_array () {
375
+function item_in_array {
375 376
     local item
376 377
     for item in "${@:2}"; do
377 378
         if [[ "$item" == "$1" ]]; then

+ 1
- 0
scripts/common_vars.bash View File

@@ -1,3 +1,4 @@
1
+#!/bin/bash
1 2
 # Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs
2 3
 # just have the branch they're proposed to, e.g ZUUL_BRANCH, in both
3 4
 # cases we need to set STABLE_RELEASE to match for tripleo.sh

+ 11
- 13
scripts/compare-reviews.py View File

@@ -4,11 +4,11 @@ from __future__ import print_function
4 4
 import argparse
5 5
 import difflib
6 6
 import json
7
-import requests
8 7
 import os
8
+import requests
9 9
 
10
-from colorama import init
11 10
 from colorama import Fore
11
+from colorama import init
12 12
 
13 13
 GERRIT_DETAIL_API = "https://review.openstack.org/changes/{}/detail"
14 14
 GERRIT_USER_NAME = "zuul"
@@ -16,8 +16,9 @@ ZUUL_PIPELINE = "check"
16 16
 
17 17
 
18 18
 def parse_ci_message(message):
19
-    """Convert zuul's gerrit message into a dict with job name as key and
20
-    job url as value
19
+    """Convert zuul's gerrit message into a dict
20
+
21
+    Dictionary contains job name as key and job url as value
21 22
     """
22 23
 
23 24
     jobs = {}
@@ -29,8 +30,7 @@ def parse_ci_message(message):
29 30
 
30 31
 
31 32
 def get_file(logs_url, file):
32
-    """Download a file from logs server for this job
33
-    """
33
+    """Download a file from logs server for this job"""
34 34
 
35 35
     response = requests.get(logs_url + '/logs/' + file)
36 36
     if response.ok:
@@ -39,8 +39,7 @@ def get_file(logs_url, file):
39 39
 
40 40
 
41 41
 def get_last_jobs(change):
42
-    """Get the last CI jobs execution at check pipeline for this review
43
-    """
42
+    """Get the last CI jobs execution at check pipeline for this review"""
44 43
 
45 44
     last_jobs = {}
46 45
     detail_url = GERRIT_DETAIL_API.format(change)
@@ -62,8 +61,9 @@ def get_last_jobs(change):
62 61
 
63 62
 
64 63
 def download(jobs, file_path):
65
-    """Download a file from all the specified jobs and return them as a
66
-    dictionary with job name as key and file content as value
64
+    """Download a file from all the specified jobs
65
+
66
+    Return them as a dictionary with job name as key and file content as value
67 67
     """
68 68
     downloaded_files = {}
69 69
     for job, logs in jobs.iteritems():
@@ -76,9 +76,7 @@ def download(jobs, file_path):
76 76
 
77 77
 
78 78
 def is_equal(lho_jobs, rho_jobs, file_path):
79
-    """Check the differences of file_path between the lho and rho job sets and
80
-    print out them
81
-    """
79
+    """Prints differences of file_path between the lho and rho job sets"""
82 80
 
83 81
     lho_files = download(lho_jobs, file_path)
84 82
     rho_files = download(rho_jobs, file_path)

+ 3
- 3
scripts/deploy-server.sh View File

@@ -11,9 +11,9 @@ echo puppetlabs-apache adrien-filemapper | xargs -n 1 puppet module install
11 11
 git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo
12 12
 
13 13
 if [ -e /sys/class/net/eth1 ] ; then
14
-     echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
15
-     ifdown eth1
16
-     ifup eth1
14
+    echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
15
+    ifdown eth1
16
+    ifup eth1
17 17
 fi
18 18
 
19 19
 CIREPO=/opt/stack/tripleo-ci

+ 9
- 8
scripts/deploy.sh View File

@@ -1,3 +1,4 @@
1
+#!/bin/bash
1 2
 set -eux
2 3
 set -o pipefail
3 4
 
@@ -305,9 +306,9 @@ if [ "$OSINFRA" = "0" ]; then
305 306
     stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.register.nodes.seconds"
306 307
 
307 308
     if [ $INTROSPECT == 1 ] ; then
308
-       start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
309
-       $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes
310
-       stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
309
+        start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
310
+        $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes
311
+        stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
311 312
     fi
312 313
 
313 314
     if [ $PREDICTABLE_PLACEMENT == 1 ]; then
@@ -451,7 +452,7 @@ if [ "$MULTINODE" == 0 ] && [ "$OVERCLOUD" == 1 ] ; then
451 452
             echo "crm_resource for openstack-heat-engine has failed!"
452 453
             exit $exitcode
453 454
             }
454
-         stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
455
+        stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
455 456
     fi
456 457
 fi
457 458
 
@@ -464,10 +465,10 @@ if [ "$OVERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
464 465
     # and thus the contents of delorean-ci may contain packages
465 466
     # we want to test for the current branch on upgrade
466 467
     if [ -s /etc/nodepool/sub_nodes_private ]; then
467
-      for ip in $(cat /etc/nodepool/sub_nodes_private); do
468
-        ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
469
-          sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo
470
-      done
468
+        for ip in $(cat /etc/nodepool/sub_nodes_private); do
469
+            ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
470
+            sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo
471
+        done
471 472
     fi
472 473
 
473 474
     source ~/stackrc

+ 6
- 6
scripts/generate-kill-heat.sh View File

@@ -6,12 +6,12 @@ set -o pipefail
6 6
 TMPFILE=$(mktemp)
7 7
 TMP2FILE=$(mktemp)
8 8
 
9
-function heat_resource_metadata() {
10
-  # Build os-collect-config command line arguments for the given heat
11
-  # resource, which when run, allow us to collect the heat completion
12
-  # signals.
13
-  heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/,  / --cfn-/g' -e 's/^  /--cfn-/' -e 's/$/ --print/'
14
-  echo
9
+function heat_resource_metadata {
10
+    # Build os-collect-config command line arguments for the given heat
11
+    # resource, which when run, allow us to collect the heat completion
12
+    # signals.
13
+    heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/,  / --cfn-/g' -e 's/^  /--cfn-/' -e 's/$/ --print/'
14
+    echo
15 15
 }
16 16
 
17 17
 >$TMPFILE

+ 38
- 38
scripts/getthelogs View File

@@ -1,36 +1,36 @@
1 1
 #!/bin/bash
2 2
 set -eu -o pipefail
3 3
 
4
-function usage(){
5
-  echo "Helper script for downloading tripleo-ci jobs logs"
6
-  echo
7
-  echo "Example:"
8
-  echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef"
9
-  echo
10
-  echo "Downloads the logs and starts a shell from the logs root directory"
4
+function usage {
5
+    echo "Helper script for downloading tripleo-ci jobs logs"
6
+    echo
7
+    echo "Example:"
8
+    echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef"
9
+    echo
10
+    echo "Downloads the logs and starts a shell from the logs root directory"
11 11
 }
12 12
 
13
-function finish(){
14
-  rc=${rc:-$?}
15
-  trap - EXIT
16
-  cd $TDIR/../
17
-  echo "Download job exited ${rc}"
18
-  PS1="JOBLOGS ]\$  " bash --noprofile --norc
13
+function finish {
14
+    rc=${rc:-$?}
15
+    trap - EXIT
16
+    cd $TDIR/../
17
+    echo "Download job exited ${rc}"
18
+    PS1="JOBLOGS ]\$  " bash --noprofile --norc
19 19
 }
20 20
 
21
-function get_dirs(){
22
-  local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
23
-  local directories=""
24
-  directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")
25
-  if [ -n "$directories" ]; then
26
-    for d in $directories; do
27
-      directories="$directories $(get_dirs $d/)"
28
-    done
29
-    echo $directories
30
-  else
31
-    echo ""
32
-  fi
33
-  return 0
21
+function get_dirs {
22
+    local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
23
+    local directories=""
24
+    directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")
25
+    if [ -n "$directories" ]; then
26
+        for d in $directories; do
27
+            directories="$directories $(get_dirs $d/)"
28
+        done
29
+        echo $directories
30
+    else
31
+        echo ""
32
+    fi
33
+    return 0
34 34
 }
35 35
 
36 36
 [[ "${1:--}" =~ ^\s+?- ]] && (usage; exit 1)
@@ -42,12 +42,12 @@ BASEURL=${1%/}
42 42
 SC=$(dirname $BASEURL | grep -o \/ | wc -w)
43 43
 if [[ $BASEURL =~ 'logs.rdoproject' && SC -le 9 ]] ||\
44 44
    [[ $BASEURL =~ 'logs.rdoproject.org/openstack-periodic' && SC -le 5 ]]; then
45
-  console="$BASEURL/console.txt.gz"
45
+    console="$BASEURL/console.txt.gz"
46 46
 elif [[ ! $(basename $BASEURL) == 'logs' && SC -le 7 ]]; then
47
-  console="$BASEURL/job-output.txt.gz"
48
-  BASEURL=${BASEURL}/logs
47
+    console="$BASEURL/job-output.txt.gz"
48
+    BASEURL=${BASEURL}/logs
49 49
 else
50
-  console=''
50
+    console=''
51 51
 fi
52 52
 TDIR=${BASEURL##*http://}
53 53
 TDIR=${TDIR##*https://}
@@ -59,18 +59,18 @@ echo "Target dir for download: $TDIR"
59 59
 echo Will download logs from the following URLs:
60 60
 list_to_get="$console $(get_dirs $BASEURL/)"
61 61
 for d in $list_to_get; do
62
-  echo $d
62
+    echo $d
63 63
 done
64 64
 
65 65
 rm -f wget-jobs.txt
66 66
 for d in $list_to_get; do
67
-  args="\"-nv -nc --no-use-server-timestamps \
68
-  --accept-regex='\.txt\.gz$|messages$' \
69
-  --reject='index.html*' \
70
-  --recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \
71
-  --no-parent \
72
-  -erobots=off --wait 0.25 ${d}\""
73
-  echo "${args}" >> wget-jobs.txt
67
+    args="\"-nv -nc --no-use-server-timestamps \
68
+    --accept-regex='\.txt\.gz$|messages$' \
69
+    --reject='index.html*' \
70
+    --recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \
71
+    --no-parent \
72
+    -erobots=off --wait 0.25 ${d}\""
73
+    echo "${args}" >> wget-jobs.txt
74 74
 done
75 75
 
76 76
 cat wget-jobs.txt | sed -n '{p;p}' | shuf > wget-jobs-shuf.txt

+ 17
- 8
scripts/metrics.bash View File

@@ -1,3 +1,4 @@
1
+#!/bin/bash
1 2
 export METRICS_START_TIMES=/tmp/metric-start-times
2 3
 export METRICS_DATA_FILE=/tmp/metrics-data
3 4
 
@@ -17,9 +18,12 @@ function record_metric {
17 18
 # called. NOTE: time metrics names must be unique.
18 19
 function start_metric {
19 20
     local NAME=$1
20
-    local START_TIME=$(date +%s)
21
+    local METRIC_NAME
22
+    local START_TIME
23
+    START_TIME=$(date +%s)
24
+
21 25
     # we use : as our delimiter so convert to _. Also convert spaces and /'s.
22
-    local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
26
+    METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
23 27
 
24 28
     if grep -c "^$METRIC_NAME:" $METRICS_START_TIMES &>/dev/null; then
25 29
         echo "start_metric has already been called for $NAME" >&2
@@ -33,18 +37,23 @@ function start_metric {
33 37
 # The total time (in seconds) is calculated and logged to the metrics
34 38
 # data file. NOTE: the end time is used as the DTS.
35 39
 function stop_metric {
40
+    local END_TIME
41
+    local LINE
42
+    local METRIC_NAME
36 43
     local NAME=$1
37
-    local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
38
-    local END_TIME=$(date +%s)
44
+    local START_TIME
45
+    local TOTAL_TIME
46
+
47
+    METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
48
+    END_TIME=$(date +%s)
39 49
     if ! grep -c "^$METRIC_NAME" $METRICS_START_TIMES &>/dev/null; then
40 50
         echo "Please call start_metric before calling stop_metric for $NAME" >&2
41 51
         exit 1
42 52
     fi
43
-    local LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
44
-    local START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
45
-    local TOTAL_TIME="$(($END_TIME - $START_TIME))"
53
+    LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
54
+    START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
55
+    TOTAL_TIME="$(($END_TIME - $START_TIME))"
46 56
     record_metric "$METRIC_NAME" "$TOTAL_TIME" "$END_TIME"
47
-
48 57
 }
49 58
 
50 59
 function metrics_to_graphite {

+ 22
- 22
scripts/mirror-server/mirror-images.sh View File

@@ -6,31 +6,31 @@ MIRRORURL="https://images.rdoproject.org/${RELEASE}/delorean/current-tripleo"
6 6
 IMAGES="overcloud-full.tar ironic-python-agent.tar"
7 7
 
8 8
 function check_new_image {
9
-  local img=$1
10
-  wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || {
11
-    echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue"
12
-    exit 1
13
-    }
14
-  diff -q test_md5 ${img}.md5 >/dev/null
9
+    local img=$1
10
+    wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || {
11
+        echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue"
12
+        exit 1
13
+        }
14
+    diff -q test_md5 ${img}.md5 >/dev/null
15 15
 }
16 16
 
17 17
 function update_images {
18
-  for img in $IMAGES; do
19
-    wget ${MIRRORURL}/${img} -O ${img}-${RELEASE}
20
-    wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5
21
-    down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})"
22
-    real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})"
23
-    if [[ "$down_md5" == "$real_md5" ]]; then
24
-        mv -f ${img}-${RELEASE} ${img}
25
-        mv -f ${img}-${RELEASE}.md5 ${img}.md5
26
-    else
27
-        echo "md5 doesn't match, image download was broken!"
28
-        echo "Calculated md5 is $real_md5 and downloaded is $down_md5"
29
-        rm -f "${img}-${RELEASE}"
30
-        rm -f "${img}-${RELEASE}.md5"
31
-    fi
32
-  done
33
-  wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null
18
+    for img in $IMAGES; do
19
+        wget ${MIRRORURL}/${img} -O ${img}-${RELEASE}
20
+        wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5
21
+        down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})"
22
+        real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})"
23
+        if [[ "$down_md5" == "$real_md5" ]]; then
24
+            mv -f ${img}-${RELEASE} ${img}
25
+            mv -f ${img}-${RELEASE}.md5 ${img}.md5
26
+        else
27
+            echo "md5 doesn't match, image download was broken!"
28
+            echo "Calculated md5 is $real_md5 and downloaded is $down_md5"
29
+            rm -f "${img}-${RELEASE}"
30
+            rm -f "${img}-${RELEASE}.md5"
31
+        fi
32
+    done
33
+    wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null
34 34
 }
35 35
 
36 36
 mkdir -p $BUILDS

+ 6
- 3
scripts/mirror-server/upload.cgi View File

@@ -1,16 +1,18 @@
1 1
 #!/bin/python3.4
2 2
 
3
+from builtins import FileExistsError
3 4
 import cgi
4
-import cgitb
5 5
 import fcntl
6 6
 import os
7 7
 import shutil
8 8
 import sys
9 9
 import tempfile
10 10
 
11
-basedir="/var/www/html/"
11
+basedir = "/var/www/html/"
12 12
 
13 13
 print("Content-Type: text/html\n")
14
+
15
+
14 16
 def saveform(form, storagedir):
15 17
     for key in form.keys():
16 18
         entry = form[key]
@@ -36,6 +38,7 @@ def saveform(form, storagedir):
36 38
                 fp.write(line)
37 39
             fp.close()
38 40
 
41
+
39 42
 def run():
40 43
 
41 44
     if not os.environ.get("REMOTE_ADDR", "").startswith("192.168."):
@@ -73,5 +76,5 @@ def run():
73 76
         fcntl.lockf(fd, fcntl.LOCK_UN)
74 77
         os.close(fd)
75 78
 
76
-sys.exit(run())
77 79
 
80
+sys.exit(run())

+ 1
- 1
scripts/oooq_common_functions.sh View File

@@ -65,7 +65,7 @@ function is_featureset {
65 65
     local type="${1}"
66 66
     local featureset_file="${2}"
67 67
 
68
-    [ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]
68
+    [[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]]
69 69
 }
70 70
 
71 71
 function run_with_timeout {

+ 3
- 4
scripts/te-broker/destroy-env View File

@@ -21,7 +21,7 @@ set -x
21 21
 # NOTE(bnemec): This function starts the port deletions in the background.
22 22
 # To ensure they complete before you proceed, you must call "wait" after
23 23
 # calling this function.
24
-function delete_ports() {
24
+function delete_ports {
25 25
     local subnetid=${1:-}
26 26
     if [ -z "$subnetid" ]; then
27 27
         return
@@ -37,8 +37,7 @@ CONSOLE_LOG_PATH=/var/www/html/tebroker/console-logs/
37 37
 nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs
38 38
 
39 39
 # Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes.
40
-for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM
41
-do
40
+for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM ; do
42 41
     openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' | gzip > $CONSOLE_LOG_PATH/$server-console.log.gz || true
43 42
 done
44 43
 
@@ -55,7 +54,7 @@ wait
55 54
 # If there was a keypair for this specific run, delete it.
56 55
 openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
57 56
 
58
-function delete_stack() {
57
+function delete_stack {
59 58
     local stackname=$1
60 59
     # Nothing to do if the specified stack doesn't exist
61 60
     if ! heat stack-show $stackname; then

+ 36
- 22
scripts/te-broker/testenv-worker View File

@@ -23,8 +23,8 @@ import json
23 23
 import logging
24 24
 import logging.handlers
25 25
 import os
26
-import sys
27 26
 import subprocess
27
+import sys
28 28
 import tempfile
29 29
 import threading
30 30
 import time
@@ -35,9 +35,11 @@ from novaclient import client as novaclient
35 35
 from novaclient import exceptions
36 36
 
37 37
 # 100Mb log files
38
-maxBytes=1024*1024*100
38
+maxBytes = 1024*1024*100
39 39
 
40
-logging.basicConfig(filename="/var/www/html/tebroker/testenv-worker.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
40
+logging.basicConfig(
41
+    filename="/var/www/html/tebroker/testenv-worker.log",
42
+    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
41 43
 
42 44
 
43 45
 class CallbackClient(gear.Client):
@@ -86,7 +88,7 @@ class TEWorkerThread(threading.Thread):
86 88
             self.runJob()
87 89
         except gear.InterruptedError:
88 90
             logger.info('getJob interrupted...')
89
-        except:
91
+        except Exception:
90 92
             logger.exception('Error while run_te_worker worker')
91 93
         self.running = False
92 94
 
@@ -132,17 +134,18 @@ class TEWorkerThread(threading.Thread):
132 134
             with tempfile.NamedTemporaryFile('r') as fp:
133 135
                 os.environ["TE_DATAFILE"] = fp.name
134 136
                 logger.info(
135
-                    subprocess.check_output([self.scriptfiles[0],
136
-                                             self.num,
137
-                                             arguments.get("envsize","2"),
138
-                                             arguments.get("ucinstance",""),
139
-                                             arguments.get("create_undercloud", ""),
140
-                                             arguments.get("ssh_key", ""),
141
-                                             arguments.get("net_iso", "multi-nic"),
142
-                                             arguments.get("compute_envsize","0"),
143
-                                             arguments.get("extra_nodes", "0"),
144
-                                             ],
145
-                                            stderr=subprocess.STDOUT))
137
+                    subprocess.check_output([
138
+                        self.scriptfiles[0],
139
+                        self.num,
140
+                        arguments.get("envsize", "2"),
141
+                        arguments.get("ucinstance", ""),
142
+                        arguments.get("create_undercloud", ""),
143
+                        arguments.get("ssh_key", ""),
144
+                        arguments.get("net_iso", "multi-nic"),
145
+                        arguments.get("compute_envsize", "0"),
146
+                        arguments.get("extra_nodes", "0"),
147
+                        ],
148
+                        stderr=subprocess.STDOUT))
146 149
                 clientdata = fp.read()
147 150
         except subprocess.CalledProcessError as e:
148 151
             logger.error(e.output)
@@ -164,7 +167,8 @@ class TEWorkerThread(threading.Thread):
164 167
             if not cb_job.running:
165 168
                 logger.error("No sign of the Callback job starting,"
166 169
                              "assuming its no longer present")
167
-                clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
170
+                clientdata = subprocess.check_output(
171
+                    [self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
168 172
                 logger.info(clientdata)
169 173
                 client.shutdown()
170 174
                 return
@@ -182,7 +186,8 @@ class TEWorkerThread(threading.Thread):
182 186
         else:
183 187
             logger.info('Returned from Job : %s', cb_job.data)
184 188
         try:
185
-            clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
189
+            clientdata = subprocess.check_output(
190
+                [self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
186 191
         except subprocess.CalledProcessError as e:
187 192
             logger.error(e.output)
188 193
             raise
@@ -238,7 +243,7 @@ def _check_instance_alive(nclient, instance, event):
238 243
     """
239 244
     if instance:
240 245
         try:
241
-            i = nclient.servers.get(instance)
246
+            nclient.servers.get(instance)
242 247
         except exceptions.NotFound:
243 248
             # There is a very brief period of time where instance could be set
244 249
             # and event not.  It's unlikely to happen, but let's be safe.
@@ -254,8 +259,10 @@ def main(args=sys.argv[1:]):
254 259
                     '"locked" state while it calls back to the client. The '
255 260
                     'clients job is provided with data (contents of datafile)'
256 261
     )
257
-    parser.add_argument('scriptfiles', nargs=2,
258
-                        help='Path to a script whos output is provided to the client')
262
+    parser.add_argument(
263
+        'scriptfiles',
264
+        nargs=2,
265
+        help='Path to a script whos output is provided to the client')
259 266
     parser.add_argument('--timeout', '-t', type=int, default=10800,
260 267
                         help='The maximum number of seconds to hold the '
261 268
                              'testenv for, can be overridden by the client.')
@@ -271,7 +278,10 @@ def main(args=sys.argv[1:]):
271 278
 
272 279
     global logger
273 280
     logger = logging.getLogger('testenv-worker-' + opts.tenum)
274
-    logger.addHandler(logging.handlers.RotatingFileHandler("/var/www/html/tebroker/testenv-worker.log", maxBytes=maxBytes, backupCount=5))
281
+    logger.addHandler(logging.handlers.RotatingFileHandler(
282
+        "/var/www/html/tebroker/testenv-worker.log",
283
+        maxBytes=maxBytes,
284
+        backupCount=5))
275 285
     logger.setLevel(logging.INFO)
276 286
     logger.removeHandler(logger.handlers[0])
277 287
 
@@ -279,7 +289,11 @@ def main(args=sys.argv[1:]):
279 289
         logger.setLevel(logging.DEBUG)
280 290
 
281 291
     logger.info('Starting test-env worker with data %r', opts.scriptfiles)
282
-    te_worker = TEWorkerThread(opts.geard, opts.tenum, opts.timeout, opts.scriptfiles)
292
+    te_worker = TEWorkerThread(
293
+        opts.geard,
294
+        opts.tenum,
295
+        opts.timeout,
296
+        opts.scriptfiles)
283 297
 
284 298
     te_worker.start()
285 299
 

+ 1
- 2
scripts/to_build View File

@@ -9,8 +9,7 @@ function set_env {
9 9
 # The updates job already takes a long time, always use cache for it
10 10
 [[ "$TOCI_JOBTYPE" =~ updates ]] && set_env "false"
11 11
 # There are some projects that require images building
12
-for PROJFULLREF in ${ZUUL_CHANGES//^/ };
13
-do
12
+for PROJFULLREF in ${ZUUL_CHANGES//^/ }; do
14 13
     PROJ=${PROJFULLREF%%:*};
15 14
     PROJ=${PROJ##*/};
16 15
     [[ "$PROJ" =~ diskimage-builder|tripleo-image-elements|tripleo-puppet-elements|instack-undercloud|python-tripleoclient|tripleo-common ]] && set_env "true"

+ 10
- 8
scripts/tripleo.sh View File

@@ -167,11 +167,11 @@ NODEPOOL_RDO_PROXY=${NODEPOOL_RDO_PROXY:-https://trunk.rdoproject.org}
167 167
 NODEPOOL_BUILDLOGS_CENTOS_PROXY="${NODEPOOL_BUILDLOGS_CENTOS_PROXY:-https://buildlogs.centos.org}"
168 168
 NODEPOOL_CBS_CENTOS_PROXY="${NODEPOOL_CBS_CENTOS_PROXY:-https://cbs.centos.org/repos}"
169 169
 OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
170
-  $REPO_PREFIX/$CEPH_REPO_FILE"
170
+    $REPO_PREFIX/$CEPH_REPO_FILE"
171 171
 OPSTOOLS_REPO_ENABLED=${OPSTOOLS_REPO_ENABLED:-"0"}
172 172
 if [[ "${OPSTOOLS_REPO_ENABLED}" = 1 ]]; then
173
-  OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
174
-    $REPO_PREFIX/centos-opstools.repo"
173
+    OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
174
+        $REPO_PREFIX/centos-opstools.repo"
175 175
 fi
176 176
 FEATURE_BRANCH=${FEATURE_BRANCH:-}
177 177
 DELOREAN_SETUP=${DELOREAN_SETUP:-""}
@@ -250,7 +250,11 @@ function log {
250 250
 }
251 251
 
252 252
 function source_rc {
253
-    if [ $1 = "stackrc" ] ; then cloud="Undercloud"; else cloud="Overcloud"; fi
253
+    if [ $1 = "stackrc" ]; then
254
+        cloud="Undercloud"
255
+    else
256
+        cloud="Overcloud"
257
+    fi
254 258
     echo "You must source a $1 file for the $cloud."
255 259
     echo "Attempting to source $HOME/$1"
256 260
     source $HOME/$1
@@ -665,8 +669,7 @@ function overcloud_deploy {
665 669
     exitval=0
666 670
     log "Deploy command arguments: $OVERCLOUD_DEPLOY_ARGS"
667 671
     openstack overcloud deploy $OVERCLOUD_DEPLOY_ARGS || exitval=1
668
-    if [ $exitval -eq 1 ];
669
-    then
672
+    if [ $exitval -eq 1 ]; then
670 673
         log "Overcloud create - FAILED!"
671 674
         exit 1
672 675
     fi
@@ -713,8 +716,7 @@ function overcloud_update {
713 716
         log "Overcloud update started."
714 717
         exitval=0
715 718
         openstack overcloud deploy $OVERCLOUD_UPDATE_ARGS || exitval=1
716
-        if [ $exitval -eq 1 ];
717
-        then
719
+        if [ $exitval -eq 1 ]; then
718 720
             log "Overcloud update - FAILED!"
719 721
             exit 1
720 722
         fi

+ 1
- 1
scripts/website/README.md View File

@@ -30,7 +30,7 @@ and Lauchpad API connections) and generate the CI reports which contact
30 30
 the API of the upstream Jenkins servers.
31 31
 
32 32
 If you want to do a quick build to test out new HTML formatting, etc. you
33
-can disable the reviewday and CI reports by running the following:  
33
+can disable the reviewday and CI reports by running the following:
34 34
 
35 35
   cd tripleo-ci/scripts/website
36 36
   SKIP\_REVIEWDAY="Y" SKIP\_CI\_REPORTS="Y" OUT\_HTML='out\_html' bash generate\_site.sh

+ 74
- 74
scripts/website/generate_site.sh View File

@@ -24,32 +24,32 @@ SKIP_BLOG=${SKIP_BLOG:-''}
24 24
 
25 25
 # TRIPLEO-DOCS
26 26
 if [ ! -d tripleo-docs ]; then
27
-  git clone git://git.openstack.org/openstack/tripleo-docs
28
-  pushd tripleo-docs
29
-  tox -edocs #initial run
30
-  popd
27
+    git clone git://git.openstack.org/openstack/tripleo-docs
28
+    pushd tripleo-docs
29
+    tox -edocs #initial run
30
+    popd
31 31
 else
32
-  pushd tripleo-docs
33
-  git reset --hard origin/master
34
-  git pull
35
-  # NOTE(bnemec): We need to rebuild this venv each time or changes to
36
-  # tripleosphinx won't be picked up.
37
-  tox -re docs
38
-  popd
32
+    pushd tripleo-docs
33
+    git reset --hard origin/master
34
+    git pull
35
+    # NOTE(bnemec): We need to rebuild this venv each time or changes to
36
+    # tripleosphinx won't be picked up.
37
+    tox -re docs
38
+    popd
39 39
 fi
40 40
 
41 41
 # TRIPLEO SPHINX
42 42
 if [ ! -d tripleosphinx ]; then
43
-  git clone https://github.com/dprince/tripleosphinx.git
44
-  pushd tripleosphinx
45
-  tox -edocs #creates the blank.html
46
-  popd
43
+    git clone https://github.com/dprince/tripleosphinx.git
44
+    pushd tripleosphinx
45
+    tox -edocs #creates the blank.html
46
+    popd
47 47
 else
48
-  pushd tripleosphinx
49
-  git reset --hard origin/master
50
-  git pull
51
-  tox -edocs #creates the blank.html
52
-  popd
48
+    pushd tripleosphinx
49
+    git reset --hard origin/master
50
+    git pull
51
+    tox -edocs #creates the blank.html
52
+    popd
53 53
 fi
54 54
 
55 55
 # swap in custom tripleosphinx
@@ -60,33 +60,33 @@ popd
60 60
 
61 61
 #REVIEWDAY
62 62
 if [ ! -d reviewday ]; then
63
-  git clone git://git.openstack.org/openstack-infra/reviewday
63
+    git clone git://git.openstack.org/openstack-infra/reviewday
64 64
 else
65
-  pushd reviewday
66
-  git reset --hard origin/master
67
-  git pull
68
-  popd
65
+    pushd reviewday
66
+    git reset --hard origin/master
67
+    git pull
68
+    popd
69 69
 fi
70 70
 
71 71
 #TRIPLEO CI
72 72
 if [ ! -d tripleo-ci ]; then
73
-  git clone git://git.openstack.org/openstack-infra/tripleo-ci
73
+    git clone git://git.openstack.org/openstack-infra/tripleo-ci
74 74
 else
75
-  pushd tripleo-ci
76
-  git reset --hard origin/master
77
-  git pull
78
-  popd
75
+    pushd tripleo-ci
76
+    git reset --hard origin/master
77
+    git pull
78
+    popd
79 79
 fi
80 80
 
81 81
 #Planet (Blog Feed Aggregator)
82 82
 PLANET_DIR='planet-venus'
83 83
 if [ ! -d '$PLANET_DIR' ]; then
84
-  git clone https://github.com/rubys/venus.git $PLANET_DIR
84
+    git clone https://github.com/rubys/venus.git $PLANET_DIR
85 85
 else
86
-  pushd $PLANET_DIR
87
-  git reset --hard origin/master
88
-  git pull
89
-  popd
86
+    pushd $PLANET_DIR
87
+    git reset --hard origin/master
88
+    git pull
89
+    popd
90 90
 fi
91 91
 
92 92
 #-----------------------------------------
@@ -104,54 +104,54 @@ $SUDO_CP mkdir -p $OUT_HTML
104 104
 
105 105
 # Reviewday
106 106
 if [ -z "$SKIP_REVIEWDAY" ]; then
107
-  pushd reviewday
108
-  tox -erun -- "-p $REVIEWDAY_INPUT_FILE"
109
-  $SUDO_CP cp -a arrow* out_report/*.png out_report/*.js out_report/*.css $OUT_HTML
110
-  DATA=$(cat out_report/data_table.html)
111
-  popd
112
-  OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/reviews.html
113
-  TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
114
-  sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
115
-  echo "<h1>TripleO Reviews</h1>" >> $OUT_FILE
116
-  sed -e "s|<title>.*|<title>TripleO: Reviews</title>|" -i $OUT_FILE # custom title
117
-  sed -e "s|<title>.*|<title>TripleO: Reviews</title><meta name='description' content='OpenStack Deployment Program Reviews'/>|" -i $OUT_FILE # custom title
118
-  echo "$DATA" >> $OUT_FILE
119
-  sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
107
+    pushd reviewday
108
+    tox -erun -- "-p $REVIEWDAY_INPUT_FILE"
109
+    $SUDO_CP cp -a arrow* out_report/*.png out_report/*.js out_report/*.css $OUT_HTML
110
+    DATA=$(cat out_report/data_table.html)
111
+    popd
112
+    OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/reviews.html
113
+    TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
114
+    sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
115
+    echo "<h1>TripleO Reviews</h1>" >> $OUT_FILE
116
+    sed -e "s|<title>.*|<title>TripleO: Reviews</title>|" -i $OUT_FILE # custom title
117
+    sed -e "s|<title>.*|<title>TripleO: Reviews</title><meta name='description' content='OpenStack Deployment Program Reviews'/>|" -i $OUT_FILE # custom title
118
+    echo "$DATA" >> $OUT_FILE
119
+    sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
120 120
 fi
121 121
 
122 122
 # TripleO CI
123 123
 if [ -z "$SKIP_CI_REPORTS" ]; then
124
-  pushd tripleo-ci
124
+    pushd tripleo-ci
125 125
 
126
-  # jobs report
127
-  tox -ecireport -- -b '^.*'
128
-  DATA=$(cat tripleo-jobs.html-table)
129
-  popd
130
-  OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/cistatus.html
131
-  TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
132
-  sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
133
-  echo "<h1>TripleO CI Status</h1>" >> $OUT_FILE
134
-  sed -e "s|<title>.*|<title>TripleO: CI Status</title><meta name='description' content='OpenStack Deployment Program CI Status results'/>|" -i $OUT_FILE # custom title
135
-  echo "$DATA" >> $OUT_FILE
136
-  sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
126
+    # jobs report
127
+    tox -ecireport -- -b '^.*'
128
+    DATA=$(cat tripleo-jobs.html-table)
129
+    popd
130
+    OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/cistatus.html
131
+    TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
132
+    sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
133
+    echo "<h1>TripleO CI Status</h1>" >> $OUT_FILE
134
+    sed -e "s|<title>.*|<title>TripleO: CI Status</title><meta name='description' content='OpenStack Deployment Program CI Status results'/>|" -i $OUT_FILE # custom title
135
+    echo "$DATA" >> $OUT_FILE
136
+    sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
137 137
 fi
138 138
 
139 139
 # Planet
140 140
 if [ -z "$SKIP_BLOG" ]; then
141
-  cp $SCRIPT_DIR/tripleo-ci/scripts/website/planet* $SCRIPT_DIR/$PLANET_DIR
142
-  pushd $SCRIPT_DIR/$PLANET_DIR
143
-  mkdir output
144
-  rm planet.html.tmplc # cleanup from previous runs
145
-  python planet.py planet.config.ini
146
-  popd
147
-  DATA=$(cat $PLANET_DIR/output/planet.html)
148
-  OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/planet.html
149
-  TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
150
-  sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
151
-  echo "<h1>Planet TripleO</h1>" >> $OUT_FILE
152
-  sed -e "s|<title>.*|<title>Planet TripleO</title><meta name='description' content='OpenStack Deployment Program Planet'/>|" -i $OUT_FILE # custom title
153
-  echo "$DATA" >> $OUT_FILE
154
-  sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
141
+    cp $SCRIPT_DIR/tripleo-ci/scripts/website/planet* $SCRIPT_DIR/$PLANET_DIR
142
+    pushd $SCRIPT_DIR/$PLANET_DIR
143
+    mkdir output
144
+    rm planet.html.tmplc # cleanup from previous runs
145
+    python planet.py planet.config.ini
146
+    popd
147
+    DATA=$(cat $PLANET_DIR/output/planet.html)
148
+    OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/planet.html
149
+    TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
150
+    sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
151
+    echo "<h1>Planet TripleO</h1>" >> $OUT_FILE
152
+    sed -e "s|<title>.*|<title>Planet TripleO</title><meta name='description' content='OpenStack Deployment Program Planet'/>|" -i $OUT_FILE # custom title
153
+    echo "$DATA" >> $OUT_FILE
154
+    sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
155 155
 fi
156 156
 
157 157
 # Copy in the new web pages

+ 2
- 1
test-environments/net-iso.yaml View File

@@ -1,5 +1,6 @@
1
+---
1 2
 parameter_defaults:
2 3
   ControlPlaneSubnetCidr: "24"
3 4
   ControlPlaneDefaultRoute: 192.168.24.1
4 5
   EC2MetadataIp: 192.168.24.1
5
-  DnsServers: ["8.8.8.8","8.8.4.4"]
6
+  DnsServers: ["8.8.8.8", "8.8.4.4"]

+ 5
- 5
test-environments/network-templates/nic-configs/ceph-storage.yaml View File

@@ -25,7 +25,7 @@ parameters:
25 25
     default: ''
26 26
     description: IP address/subnet on the tenant network
27 27
     type: string
28
-  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
28
+  ManagementIpSubnet:  # Only populated when including environments/network-management.yaml
29 29
     default: ''
30 30
     description: IP address/subnet on the management network
31 31
     type: string
@@ -62,18 +62,18 @@ parameters:
62 62
     default: '10.0.0.1'
63 63
     description: default route for the external network
64 64
     type: string
65
-  ControlPlaneSubnetCidr: # Override this via parameter_defaults
65
+  ControlPlaneSubnetCidr:  # Override this via parameter_defaults
66 66
     default: '24'
67 67
     description: The subnet CIDR of the control plane network.
68 68
     type: string
69
-  ControlPlaneDefaultRoute: # Override this via parameter_defaults
69
+  ControlPlaneDefaultRoute:  # Override this via parameter_defaults
70 70
     description: The default route of the control plane network.
71 71
     type: string
72
-  DnsServers: # Override this via parameter_defaults
72
+  DnsServers:  # Override this via parameter_defaults
73 73
     default: []
74 74
     description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
75 75
     type: comma_delimited_list
76
-  EC2MetadataIp: # Override this via parameter_defaults
76
+  EC2MetadataIp:  # Override this via parameter_defaults
77 77
     description: The IP address of the EC2 metadata server.
78 78
     type: string
79 79
 

+ 1
- 0
test-environments/scheduler-hints.yaml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 parameter_defaults:
2 3
   ControllerSchedulerHints:
3 4
     'capabilities:node': 'controller-%index%'

+ 2
- 2
test-requirements.txt View File

@@ -1,8 +1,8 @@
1
-flake8
2 1
 pytest
3 2
 pytest-html
4 3
 pytest-cov
5 4
 mock
6 5
 requests
7 6
 pprint
8
-PyYAML
7
+pre-commit>=1.10 # MIT License
8
+PyYAML

+ 13
- 11
testenv-client View File

@@ -21,9 +21,9 @@
21 21
 import argparse
22 22
 import json
23 23
 import logging
24
-import sys
25
-import subprocess
26 24
 import os
25
+import subprocess
26
+import sys
27 27
 import tempfile
28 28
 import textwrap
29 29
 import threading
@@ -65,7 +65,8 @@ class TestCallback(object):
65 65
         if time_waiting > 90:
66 66
             logger.warn('%.1f seconds waiting for a worker.' % (time_waiting))
67 67
 
68
-        if "Couldn't retrieve env" in job.arguments or "Failed creating OVB stack" in job.arguments:
68
+        if "Couldn't retrieve env" in job.arguments or \
69
+                "Failed creating OVB stack" in job.arguments:
69 70
             logger.error(job.arguments)
70 71
             self.rv = 2
71 72
             job.sendWorkComplete("")
@@ -80,7 +81,7 @@ class TestCallback(object):
80 81
 
81 82
             try:
82 83
                 self.rv = subprocess.call(self.command)
83
-            except:
84
+            except Exception:
84 85
                 logger.exception("Error calling command")
85 86
                 self.rv = 2
86 87
 
@@ -195,14 +196,14 @@ def main(args=sys.argv[1:]):
195 196
     job_params = {
196 197
         "callback_name": callback_name,
197 198
         "timeout": opts.timeout,
198
-        "envsize":opts.envsize,
199
-        "compute_envsize":opts.compute_envsize,
200
-        "ucinstance":opts.ucinstance,
199
+        "envsize": opts.envsize,
200
+        "compute_envsize": opts.compute_envsize,
201
+        "ucinstance": opts.ucinstance,
201 202
         "create_undercloud": "true" if opts.create_undercloud else "",
202
-        "ssh_key":opts.ssh_key,
203
-        "net_iso":opts.net_iso,
204
-        "extra_nodes":opts.extra_nodes,
205
-        "job_identifier":job_identifier,
203
+        "ssh_key": opts.ssh_key,
204
+        "net_iso": opts.net_iso,
205
+        "extra_nodes": opts.extra_nodes,
206
+        "job_identifier": job_identifier,
206 207
     }
207 208
     job = gear.Job('lockenv', json.dumps(job_params))
208 209
     client.submitJob(job)
@@ -227,5 +228,6 @@ def main(args=sys.argv[1:]):
227 228
     logger.debug("Exiting with status : %d", cb.rv)
228 229
     return cb.rv
229 230
 
231
+
230 232
 if __name__ == '__main__':
231 233
     exit(main())

+ 1
- 0
toci-quickstart/config/collect-logs.yml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 # Collect logs settings
2 3
 
3 4
 # artcl_tar_gz: true

+ 1
- 0
toci-quickstart/config/testenv/multinode-rdocloud.yml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 # TRIPLEO-CI environment settings
2 3
 undercloud_user: "{{ lookup('env','USER') }}"
3 4
 non_root_user: "{{ undercloud_user }}"

+ 1
- 0
toci-quickstart/config/testenv/ovb.yml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 undercloud_type: ovb
2 3
 use_testenv_broker: true
3 4
 build_test_packages: true

+ 10
- 10
toci_quickstart.sh View File

@@ -151,16 +151,16 @@ else
151 151
     for playbook in $PLAYBOOKS; do
152 152
         echo "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}"
153 153
         run_with_timeout $START_JOB_TIME $QUICKSTART_INSTALL_CMD \
154
-           "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
155
-           $NODES_ARGS \
156
-           $FEATURESET_CONF \
157
-           $ENV_VARS \
158
-           $EXTRA_VARS \
159
-           $VXLAN_VARS \
160
-           $DEFAULT_ARGS \
161
-           --extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
162
-            $LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
163
-            2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
154
+            "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
155
+            $NODES_ARGS \
156
+            $FEATURESET_CONF \
157
+            $ENV_VARS \
158
+            $EXTRA_VARS \
159
+            $VXLAN_VARS \
160
+            $DEFAULT_ARGS \
161
+            --extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
162
+                $LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
163
+                2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
164 164
 
165 165
         # Print status of playbook run
166 166
         [[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"

+ 2
- 1
tox.ini View File

@@ -18,7 +18,8 @@ commands = pyflakes setup.py scripts
18 18
 [testenv:linters]
19 19
 basepython = python3
20 20
 whitelist_externals = bash
21
-commands = flake8 --max-line-length 80 {toxinidir} {posargs}
21
+commands = python -m pre_commit run --source HEAD^ --origin HEAD
22
+
22 23
 
23 24
 # deprecated: use linters instead. kept only as a convenience alias
24 25
 [testenv:pep8]

+ 2
- 2
zuul.d/layout.yaml View File

@@ -55,8 +55,8 @@
55 55
     gate:
56 56
       queue: tripleo
57 57
       jobs:
58
-          # Don't put a files section on the linters job, otherwise no
59
-          # jobs might be defined and nothing can merge in this repo.
58
+        # Don't put a files section on the linters job, otherwise no
59
+        # jobs might be defined and nothing can merge in this repo.
60 60
         - openstack-tox-linters
61 61
         - openstack-tox-py27:
62 62
             files:

+ 1
- 0
zuul.d/nodesets.yaml View File

@@ -1,3 +1,4 @@
1
+---
1 2
 - nodeset:
2 3
     name: two-centos-7-nodes
3 4
     nodes:

Loading…
Cancel
Save