Adopt use of pre-commit linting

Follows the same configuration that was used on
tripleo-quickstart-extras and documented use on tripleo-docs.

Change-Id: Iba8a2db92137f9f6ad28f498627eb1b87039d99f
Story: https://tree.taiga.io/project/tripleo-ci-board/task/381
This commit is contained in:
Sorin Sbarnea 2018-12-06 11:11:42 +00:00
parent 40b50f763c
commit ed27a979d5
35 changed files with 396 additions and 294 deletions

18
.ansible-lint Normal file
View File

@ -0,0 +1,18 @@
exclude_paths:
- roles/validate-ui/.travis.yml
parseable: true
rulesdir:
- ./ci-scripts/ansible_rules/
quiet: false
skip_list:
- ANSIBLE0006 # Using command rather than module we have a few use cases
# where we need to use curl and rsync
- ANSIBLE0007 # Using command rather than an argument to e.g file
# we have a lot of 'rm' command and we should use file module instead
- ANSIBLE0010 # Package installs should not use latest.
# Sometimes we need to update some packages.
- ANSIBLE0012 # Commands should not change things if nothing needs doing
- ANSIBLE0013 # Use Shell only when shell functionality is required
- ANSIBLE0016 # Tasks that run when changed should likely be handlers
# this requires refactoring roles, skipping for now
verbosity: 1

42
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,42 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: debug-statements
- id: flake8
additional_dependencies:
- hacking<1.2.0,>=1.1.0
- id: check-yaml
files: .*\.(yaml|yml)$
# commented to allow progressive enablement in smaller patches
# - repo: https://github.com/adrienverge/yamllint.git
# rev: v1.13.0
# hooks:
# - id: yamllint
# files: \.(yaml|yml)$
# types: [file, yaml]
# entry: yamllint --strict -f parsable
- repo: https://github.com/ansible/ansible-lint
rev: v3.5.1
hooks:
- id: ansible-lint
files: \.(yaml|yml)$
entry: ansible-lint --force-color -v
- repo: https://github.com/openstack-dev/bashate.git
rev: 0.6.0
hooks:
- id: bashate
entry: bashate --error . --verbose --ignore=E006,E040
# Run bashate check for all bash scripts
# Ignores the following rules:
# E006: Line longer than 79 columns (as many scripts use jinja
# templating, this is very difficult)
# E040: Syntax error determined using `bash -n` (as many scripts
# use jinja templating, this will often fail and the syntax
# error will be discovered in execution anyway)

6
.yamllint Normal file
View File

@ -0,0 +1,6 @@
---
extends: default
rules:
line-length:
max: 180

View File

@ -151,16 +151,16 @@ done
for playbook in {{ " ".join(playbooks) }}; do for playbook in {{ " ".join(playbooks) }}; do
echo ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} echo ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}
$QUICKSTART_INSTALL_CMD \ $QUICKSTART_INSTALL_CMD \
${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \ ${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG} \
{{ nodes_args }} \ {{ nodes_args }} \
{{ featureset_conf }} \ {{ featureset_conf }} \
{{ env_vars }} \ {{ env_vars }} \
{{ extra_vars }} \ {{ extra_vars }} \
{{ vxlan_vars }} \ {{ vxlan_vars }} \
$DEFAULT_ARGS \ $DEFAULT_ARGS \
--extra-vars @{{ workspace }}/logs/zuul-variables.yaml \ --extra-vars @{{ workspace }}/logs/zuul-variables.yaml \
$LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-} \ $LOCAL_WORKING_DIR/playbooks/$playbook ${PLAYBOOKS_ARGS[$playbook]:-} \
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$? 2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
# Print status of playbook run # Print status of playbook run
[[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully" [[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"

View File

@ -27,7 +27,7 @@ USER=centos
# makes some assumptions but good enough for now # makes some assumptions but good enough for now
nova keypair-add --pub-key ~/.ssh/id_rsa.pub bighammer || true nova keypair-add --pub-key ~/.ssh/id_rsa.pub bighammer || true
function tapper(){ function tapper {
set -x set -x
NODENAME=test-node-$1 NODENAME=test-node-$1
@ -35,8 +35,8 @@ function tapper(){
#trap "nova delete $NODENAME" RETURN ERR #trap "nova delete $NODENAME" RETURN ERR
sleep 60 sleep 60
if [ "$(nova show $NODENAME | awk '/status/ {print $4}')" != "ACTIVE" ] ; then if [ "$(nova show $NODENAME | awk '/status/ {print $4}')" != "ACTIVE" ] ; then
nova show $NODENAME nova show $NODENAME
return 1 return 1
fi fi
IP=$(nova show $NODENAME | awk '/private network/ {print $5}') IP=$(nova show $NODENAME | awk '/private network/ {print $5}')

View File

@ -51,30 +51,30 @@ except:
export ELEMENTS_PATH="${COMMON_ELEMENTS_PATH}:/usr/share/instack-undercloud:/usr/share/tripleo-image-elements:/usr/share/tripleo-puppet-elements" export ELEMENTS_PATH="${COMMON_ELEMENTS_PATH}:/usr/share/instack-undercloud:/usr/share/tripleo-image-elements:/usr/share/tripleo-puppet-elements"
ELEMENTS=$(\ ELEMENTS=$(\
tripleo-build-images \ tripleo-build-images \
--image-json-output \ --image-json-output \
--image-name overcloud-full \ --image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \ --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \ --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].elements | map(.+" ") | add' \ | jq '. | .[0].elements | map(.+" ") | add' \
| sed 's/"//g') | sed 's/"//g')
# delorean-repo is excluded b/c we've already run --repo-setup on this node and # delorean-repo is excluded b/c we've already run --repo-setup on this node and
# we don't want to overwrite that. # we don't want to overwrite that.
sudo -E instack \ sudo -E instack \
-e centos7 \ -e centos7 \
enable-packages-install \ enable-packages-install \
install-types \ install-types \
$ELEMENTS \ $ELEMENTS \
-k extra-data \ -k extra-data \
pre-install \ pre-install \
install \ install \
post-install \ post-install \
-b 05-fstab-rootfs-label \ -b 05-fstab-rootfs-label \
00-fix-requiretty \ 00-fix-requiretty \
90-rebuild-ramdisk \ 90-rebuild-ramdisk \
00-usr-local-bin-secure-path \ 00-usr-local-bin-secure-path \
-x delorean-repo \ -x delorean-repo \
-d -d
# In the imported elements we have remove-machine-id. In multinode # In the imported elements we have remove-machine-id. In multinode
# jobs that could mean we end up without /etc/machine-id. Make sure # jobs that could mean we end up without /etc/machine-id. Make sure
@ -83,12 +83,12 @@ sudo -E instack \
PACKAGES=$(\ PACKAGES=$(\
tripleo-build-images \ tripleo-build-images \
--image-json-output \ --image-json-output \
--image-name overcloud-full \ --image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \ --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \ --image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].packages | .[] | tostring' \ | jq '. | .[0].packages | .[] | tostring' \
| sed 's/"//g') | sed 's/"//g')
# Install additional packages expected by the image # Install additional packages expected by the image
sudo yum -y install $PACKAGES sudo yum -y install $PACKAGES

View File

@ -1,3 +1,4 @@
#!/bin/bash
# Tripleo CI functions # Tripleo CI functions
# Revert a commit for tripleo ci # Revert a commit for tripleo ci
@ -5,7 +6,7 @@
# $2 : hash id of commit to revert # $2 : hash id of commit to revert
# $3 : bug id of reason for revert (used to skip revert if found in commit # $3 : bug id of reason for revert (used to skip revert if found in commit
# that triggers ci). # that triggers ci).
function temprevert(){ function temprevert {
# Before reverting check to ensure this isn't the related fix # Before reverting check to ensure this isn't the related fix
if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
echo "Skipping temprevert because bug fix $3 was found in git message." echo "Skipping temprevert because bug fix $3 was found in git message."
@ -24,7 +25,7 @@ function temprevert(){
# $2 : hash id of commit to pin too # $2 : hash id of commit to pin too
# $3 : bug id of reason for the pin (used to skip revert if found in commit # $3 : bug id of reason for the pin (used to skip revert if found in commit
# that triggers ci). # that triggers ci).
function pin(){ function pin {
# Before reverting check to ensure this isn't the related fix # Before reverting check to ensure this isn't the related fix
if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then if git --git-dir=$TRIPLEO_ROOT/${ZUUL_PROJECT#*/}/.git log -1 | grep -iE "bug.*$3" ; then
echo "Skipping pin because bug fix $3 was found in git message." echo "Skipping pin because bug fix $3 was found in git message."
@ -42,7 +43,7 @@ function pin(){
# $2 : Gerrit refspec to cherry pick # $2 : Gerrit refspec to cherry pick
# $3 : bug id of reason for the cherry pick (used to skip cherry pick if found # $3 : bug id of reason for the cherry pick (used to skip cherry pick if found
# in commit that triggers ci). # in commit that triggers ci).
function cherrypick(){ function cherrypick {
local PROJ_NAME=$1 local PROJ_NAME=$1
local REFSPEC=$2 local REFSPEC=$2
@ -66,14 +67,14 @@ function cherrypick(){
# echo's out a project name from a ref # echo's out a project name from a ref
# $1 : e.g. openstack/nova:master:refs/changes/87/64787/3 returns nova # $1 : e.g. openstack/nova:master:refs/changes/87/64787/3 returns nova
function filterref(){ function filterref {
PROJ=${1%%:*} PROJ=${1%%:*}
PROJ=${PROJ##*/} PROJ=${PROJ##*/}
echo $PROJ echo $PROJ
} }
# Mount a qcow image, copy in the delorean repositories and update the packages # Mount a qcow image, copy in the delorean repositories and update the packages
function update_image(){ function update_image {
IMAGE=$1 IMAGE=$1
MOUNTDIR=$(mktemp -d) MOUNTDIR=$(mktemp -d)
case ${IMAGE##*.} in case ${IMAGE##*.} in
@ -133,7 +134,7 @@ function update_image(){
# Decide if a particular cached artifact can be used in this CI test # Decide if a particular cached artifact can be used in this CI test
# Takes a single argument representing the name of the artifact being checked. # Takes a single argument representing the name of the artifact being checked.
function canusecache(){ function canusecache {
# If we are uploading to the cache then we shouldn't use it # If we are uploading to the cache then we shouldn't use it
[ "$CACHEUPLOAD" == 1 ] && return 1 [ "$CACHEUPLOAD" == 1 ] && return 1
@ -165,7 +166,7 @@ function canusecache(){
return 0 return 0
} }
function extract_logs(){ function extract_logs {
local name=$1 local name=$1
mkdir -p $WORKSPACE/logs/$name mkdir -p $WORKSPACE/logs/$name
local logs_tar="$WORKSPACE/logs/$name.tar.xz" local logs_tar="$WORKSPACE/logs/$name.tar.xz"
@ -178,7 +179,7 @@ function extract_logs(){
fi fi
} }
function postci(){ function postci {
local exit_val=${1:-0} local exit_val=${1:-0}
set -x set -x
set +e set +e
@ -368,10 +369,10 @@ function echo_vars_to_deploy_env {
} }
function stop_dstat { function stop_dstat {
ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill ps axjf | grep bin/dstat | grep -v grep | awk '{print $2;}' | sudo xargs -t -n 1 -r kill
} }
function item_in_array () { function item_in_array {
local item local item
for item in "${@:2}"; do for item in "${@:2}"; do
if [[ "$item" == "$1" ]]; then if [[ "$item" == "$1" ]]; then

View File

@ -1,3 +1,4 @@
#!/bin/bash
# Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs # Periodic stable jobs set OVERRIDE_ZUUL_BRANCH, gate stable jobs
# just have the branch they're proposed to, e.g ZUUL_BRANCH, in both # just have the branch they're proposed to, e.g ZUUL_BRANCH, in both
# cases we need to set STABLE_RELEASE to match for tripleo.sh # cases we need to set STABLE_RELEASE to match for tripleo.sh

View File

@ -4,11 +4,11 @@ from __future__ import print_function
import argparse import argparse
import difflib import difflib
import json import json
import requests
import os import os
import requests
from colorama import init
from colorama import Fore from colorama import Fore
from colorama import init
GERRIT_DETAIL_API = "https://review.openstack.org/changes/{}/detail" GERRIT_DETAIL_API = "https://review.openstack.org/changes/{}/detail"
GERRIT_USER_NAME = "zuul" GERRIT_USER_NAME = "zuul"
@ -16,8 +16,9 @@ ZUUL_PIPELINE = "check"
def parse_ci_message(message): def parse_ci_message(message):
"""Convert zuul's gerrit message into a dict with job name as key and """Convert zuul's gerrit message into a dict
job url as value
Dictionary contains job name as key and job url as value
""" """
jobs = {} jobs = {}
@ -29,8 +30,7 @@ def parse_ci_message(message):
def get_file(logs_url, file): def get_file(logs_url, file):
"""Download a file from logs server for this job """Download a file from logs server for this job"""
"""
response = requests.get(logs_url + '/logs/' + file) response = requests.get(logs_url + '/logs/' + file)
if response.ok: if response.ok:
@ -39,8 +39,7 @@ def get_file(logs_url, file):
def get_last_jobs(change): def get_last_jobs(change):
"""Get the last CI jobs execution at check pipeline for this review """Get the last CI jobs execution at check pipeline for this review"""
"""
last_jobs = {} last_jobs = {}
detail_url = GERRIT_DETAIL_API.format(change) detail_url = GERRIT_DETAIL_API.format(change)
@ -62,8 +61,9 @@ def get_last_jobs(change):
def download(jobs, file_path): def download(jobs, file_path):
"""Download a file from all the specified jobs and return them as a """Download a file from all the specified jobs
dictionary with job name as key and file content as value
Return them as a dictionary with job name as key and file content as value
""" """
downloaded_files = {} downloaded_files = {}
for job, logs in jobs.iteritems(): for job, logs in jobs.iteritems():
@ -76,9 +76,7 @@ def download(jobs, file_path):
def is_equal(lho_jobs, rho_jobs, file_path): def is_equal(lho_jobs, rho_jobs, file_path):
"""Check the differences of file_path between the lho and rho job sets and """Prints differences of file_path between the lho and rho job sets"""
print out them
"""
lho_files = download(lho_jobs, file_path) lho_files = download(lho_jobs, file_path)
rho_files = download(rho_jobs, file_path) rho_files = download(rho_jobs, file_path)

View File

@ -11,9 +11,9 @@ echo puppetlabs-apache adrien-filemapper | xargs -n 1 puppet module install
git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo
if [ -e /sys/class/net/eth1 ] ; then if [ -e /sys/class/net/eth1 ] ; then
echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1 echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
ifdown eth1 ifdown eth1
ifup eth1 ifup eth1
fi fi
CIREPO=/opt/stack/tripleo-ci CIREPO=/opt/stack/tripleo-ci

View File

@ -1,3 +1,4 @@
#!/bin/bash
set -eux set -eux
set -o pipefail set -o pipefail
@ -305,9 +306,9 @@ if [ "$OSINFRA" = "0" ]; then
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.register.nodes.seconds" stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.register.nodes.seconds"
if [ $INTROSPECT == 1 ] ; then if [ $INTROSPECT == 1 ] ; then
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds" start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds" stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
fi fi
if [ $PREDICTABLE_PLACEMENT == 1 ]; then if [ $PREDICTABLE_PLACEMENT == 1 ]; then
@ -451,7 +452,7 @@ if [ "$MULTINODE" == 0 ] && [ "$OVERCLOUD" == 1 ] ; then
echo "crm_resource for openstack-heat-engine has failed!" echo "crm_resource for openstack-heat-engine has failed!"
exit $exitcode exit $exitcode
} }
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds" stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
fi fi
fi fi
@ -464,10 +465,10 @@ if [ "$OVERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
# and thus the contents of delorean-ci may contain packages # and thus the contents of delorean-ci may contain packages
# we want to test for the current branch on upgrade # we want to test for the current branch on upgrade
if [ -s /etc/nodepool/sub_nodes_private ]; then if [ -s /etc/nodepool/sub_nodes_private ]; then
for ip in $(cat /etc/nodepool/sub_nodes_private); do for ip in $(cat /etc/nodepool/sub_nodes_private); do
ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \ ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo
done done
fi fi
source ~/stackrc source ~/stackrc

View File

@ -6,12 +6,12 @@ set -o pipefail
TMPFILE=$(mktemp) TMPFILE=$(mktemp)
TMP2FILE=$(mktemp) TMP2FILE=$(mktemp)
function heat_resource_metadata() { function heat_resource_metadata {
# Build os-collect-config command line arguments for the given heat # Build os-collect-config command line arguments for the given heat
# resource, which when run, allow us to collect the heat completion # resource, which when run, allow us to collect the heat completion
# signals. # signals.
heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/, / --cfn-/g' -e 's/^ /--cfn-/' -e 's/$/ --print/' heat resource-metadata overcloud $1 | jq '.["os-collect-config"]["cfn"]' | grep \" | tr -d '\n' | sed -e 's/"//g' -e 's/_/-/g' -e 's/: / /g' -e 's/, / --cfn-/g' -e 's/^ /--cfn-/' -e 's/$/ --print/'
echo echo
} }
>$TMPFILE >$TMPFILE

View File

@ -1,36 +1,36 @@
#!/bin/bash #!/bin/bash
set -eu -o pipefail set -eu -o pipefail
function usage(){ function usage {
echo "Helper script for downloading tripleo-ci jobs logs" echo "Helper script for downloading tripleo-ci jobs logs"
echo echo
echo "Example:" echo "Example:"
echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef" echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef"
echo echo
echo "Downloads the logs and starts a shell from the logs root directory" echo "Downloads the logs and starts a shell from the logs root directory"
} }
function finish(){ function finish {
rc=${rc:-$?} rc=${rc:-$?}
trap - EXIT trap - EXIT
cd $TDIR/../ cd $TDIR/../
echo "Download job exited ${rc}" echo "Download job exited ${rc}"
PS1="JOBLOGS ]\$ " bash --noprofile --norc PS1="JOBLOGS ]\$ " bash --noprofile --norc
} }
function get_dirs(){ function get_dirs {
local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b" local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
local directories="" local directories=""
directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g") directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")
if [ -n "$directories" ]; then if [ -n "$directories" ]; then
for d in $directories; do for d in $directories; do
directories="$directories $(get_dirs $d/)" directories="$directories $(get_dirs $d/)"
done done
echo $directories echo $directories
else else
echo "" echo ""
fi fi
return 0 return 0
} }
[[ "${1:--}" =~ ^\s+?- ]] && (usage; exit 1) [[ "${1:--}" =~ ^\s+?- ]] && (usage; exit 1)
@ -42,12 +42,12 @@ BASEURL=${1%/}
SC=$(dirname $BASEURL | grep -o \/ | wc -w) SC=$(dirname $BASEURL | grep -o \/ | wc -w)
if [[ $BASEURL =~ 'logs.rdoproject' && SC -le 9 ]] ||\ if [[ $BASEURL =~ 'logs.rdoproject' && SC -le 9 ]] ||\
[[ $BASEURL =~ 'logs.rdoproject.org/openstack-periodic' && SC -le 5 ]]; then [[ $BASEURL =~ 'logs.rdoproject.org/openstack-periodic' && SC -le 5 ]]; then
console="$BASEURL/console.txt.gz" console="$BASEURL/console.txt.gz"
elif [[ ! $(basename $BASEURL) == 'logs' && SC -le 7 ]]; then elif [[ ! $(basename $BASEURL) == 'logs' && SC -le 7 ]]; then
console="$BASEURL/job-output.txt.gz" console="$BASEURL/job-output.txt.gz"
BASEURL=${BASEURL}/logs BASEURL=${BASEURL}/logs
else else
console='' console=''
fi fi
TDIR=${BASEURL##*http://} TDIR=${BASEURL##*http://}
TDIR=${TDIR##*https://} TDIR=${TDIR##*https://}
@ -59,18 +59,18 @@ echo "Target dir for download: $TDIR"
echo Will download logs from the following URLs: echo Will download logs from the following URLs:
list_to_get="$console $(get_dirs $BASEURL/)" list_to_get="$console $(get_dirs $BASEURL/)"
for d in $list_to_get; do for d in $list_to_get; do
echo $d echo $d
done done
rm -f wget-jobs.txt rm -f wget-jobs.txt
for d in $list_to_get; do for d in $list_to_get; do
args="\"-nv -nc --no-use-server-timestamps \ args="\"-nv -nc --no-use-server-timestamps \
--accept-regex='\.txt\.gz$|messages$' \ --accept-regex='\.txt\.gz$|messages$' \
--reject='index.html*' \ --reject='index.html*' \
--recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \ --recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \
--no-parent \ --no-parent \
-erobots=off --wait 0.25 ${d}\"" -erobots=off --wait 0.25 ${d}\""
echo "${args}" >> wget-jobs.txt echo "${args}" >> wget-jobs.txt
done done
cat wget-jobs.txt | sed -n '{p;p}' | shuf > wget-jobs-shuf.txt cat wget-jobs.txt | sed -n '{p;p}' | shuf > wget-jobs-shuf.txt

View File

@ -1,3 +1,4 @@
#!/bin/bash
export METRICS_START_TIMES=/tmp/metric-start-times export METRICS_START_TIMES=/tmp/metric-start-times
export METRICS_DATA_FILE=/tmp/metrics-data export METRICS_DATA_FILE=/tmp/metrics-data
@ -17,9 +18,12 @@ function record_metric {
# called. NOTE: time metrics names must be unique. # called. NOTE: time metrics names must be unique.
function start_metric { function start_metric {
local NAME=$1 local NAME=$1
local START_TIME=$(date +%s) local METRIC_NAME
local START_TIME
START_TIME=$(date +%s)
# we use : as our delimiter so convert to _. Also convert spaces and /'s. # we use : as our delimiter so convert to _. Also convert spaces and /'s.
local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g') METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
if grep -c "^$METRIC_NAME:" $METRICS_START_TIMES &>/dev/null; then if grep -c "^$METRIC_NAME:" $METRICS_START_TIMES &>/dev/null; then
echo "start_metric has already been called for $NAME" >&2 echo "start_metric has already been called for $NAME" >&2
@ -33,18 +37,23 @@ function start_metric {
# The total time (in seconds) is calculated and logged to the metrics # The total time (in seconds) is calculated and logged to the metrics
# data file. NOTE: the end time is used as the DTS. # data file. NOTE: the end time is used as the DTS.
function stop_metric { function stop_metric {
local END_TIME
local LINE
local METRIC_NAME
local NAME=$1 local NAME=$1
local METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g') local START_TIME
local END_TIME=$(date +%s) local TOTAL_TIME
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
END_TIME=$(date +%s)
if ! grep -c "^$METRIC_NAME" $METRICS_START_TIMES &>/dev/null; then if ! grep -c "^$METRIC_NAME" $METRICS_START_TIMES &>/dev/null; then
echo "Please call start_metric before calling stop_metric for $NAME" >&2 echo "Please call start_metric before calling stop_metric for $NAME" >&2
exit 1 exit 1
fi fi
local LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES) LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
local START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2') START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
local TOTAL_TIME="$(($END_TIME - $START_TIME))" TOTAL_TIME="$(($END_TIME - $START_TIME))"
record_metric "$METRIC_NAME" "$TOTAL_TIME" "$END_TIME" record_metric "$METRIC_NAME" "$TOTAL_TIME" "$END_TIME"
} }
function metrics_to_graphite { function metrics_to_graphite {

View File

@ -6,31 +6,31 @@ MIRRORURL="https://images.rdoproject.org/${RELEASE}/delorean/current-tripleo"
IMAGES="overcloud-full.tar ironic-python-agent.tar" IMAGES="overcloud-full.tar ironic-python-agent.tar"
function check_new_image { function check_new_image {
local img=$1 local img=$1
wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || { wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || {
echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue" echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue"
exit 1 exit 1
} }
diff -q test_md5 ${img}.md5 >/dev/null diff -q test_md5 ${img}.md5 >/dev/null
} }
function update_images { function update_images {
for img in $IMAGES; do for img in $IMAGES; do
wget ${MIRRORURL}/${img} -O ${img}-${RELEASE} wget ${MIRRORURL}/${img} -O ${img}-${RELEASE}
wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5 wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5
down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})" down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})"
real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})" real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})"
if [[ "$down_md5" == "$real_md5" ]]; then if [[ "$down_md5" == "$real_md5" ]]; then
mv -f ${img}-${RELEASE} ${img} mv -f ${img}-${RELEASE} ${img}
mv -f ${img}-${RELEASE}.md5 ${img}.md5 mv -f ${img}-${RELEASE}.md5 ${img}.md5
else else
echo "md5 doesn't match, image download was broken!" echo "md5 doesn't match, image download was broken!"
echo "Calculated md5 is $real_md5 and downloaded is $down_md5" echo "Calculated md5 is $real_md5 and downloaded is $down_md5"
rm -f "${img}-${RELEASE}" rm -f "${img}-${RELEASE}"
rm -f "${img}-${RELEASE}.md5" rm -f "${img}-${RELEASE}.md5"
fi fi
done done
wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null
} }
mkdir -p $BUILDS mkdir -p $BUILDS

View File

@ -1,16 +1,18 @@
#!/bin/python3.4 #!/bin/python3.4
from builtins import FileExistsError
import cgi import cgi
import cgitb
import fcntl import fcntl
import os import os
import shutil import shutil
import sys import sys
import tempfile import tempfile
basedir="/var/www/html/" basedir = "/var/www/html/"
print("Content-Type: text/html\n") print("Content-Type: text/html\n")
def saveform(form, storagedir): def saveform(form, storagedir):
for key in form.keys(): for key in form.keys():
entry = form[key] entry = form[key]
@ -36,6 +38,7 @@ def saveform(form, storagedir):
fp.write(line) fp.write(line)
fp.close() fp.close()
def run(): def run():
if not os.environ.get("REMOTE_ADDR", "").startswith("192.168."): if not os.environ.get("REMOTE_ADDR", "").startswith("192.168."):
@ -73,5 +76,5 @@ def run():
fcntl.lockf(fd, fcntl.LOCK_UN) fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd) os.close(fd)
sys.exit(run())
sys.exit(run())

View File

@ -65,7 +65,7 @@ function is_featureset {
local type="${1}" local type="${1}"
local featureset_file="${2}" local featureset_file="${2}"
[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ] [[ $(shyaml get-value "${type}" "False"< "${featureset_file}") = "True" ]]
} }
function run_with_timeout { function run_with_timeout {

View File

@ -21,7 +21,7 @@ set -x
# NOTE(bnemec): This function starts the port deletions in the background. # NOTE(bnemec): This function starts the port deletions in the background.
# To ensure they complete before you proceed, you must call "wait" after # To ensure they complete before you proceed, you must call "wait" after
# calling this function. # calling this function.
function delete_ports() { function delete_ports {
local subnetid=${1:-} local subnetid=${1:-}
if [ -z "$subnetid" ]; then if [ -z "$subnetid" ]; then
return return
@ -37,8 +37,7 @@ CONSOLE_LOG_PATH=/var/www/html/tebroker/console-logs/
nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs
# Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes. # Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes.
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM ; do
do
openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' | gzip > $CONSOLE_LOG_PATH/$server-console.log.gz || true openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' | gzip > $CONSOLE_LOG_PATH/$server-console.log.gz || true
done done
@ -55,7 +54,7 @@ wait
# If there was a keypair for this specific run, delete it. # If there was a keypair for this specific run, delete it.
openstack keypair delete "tripleo-ci-key-$ENVNUM" || true openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
function delete_stack() { function delete_stack {
local stackname=$1 local stackname=$1
# Nothing to do if the specified stack doesn't exist # Nothing to do if the specified stack doesn't exist
if ! heat stack-show $stackname; then if ! heat stack-show $stackname; then

View File

@ -23,8 +23,8 @@ import json
import logging import logging
import logging.handlers import logging.handlers
import os import os
import sys
import subprocess import subprocess
import sys
import tempfile import tempfile
import threading import threading
import time import time
@ -35,9 +35,11 @@ from novaclient import client as novaclient
from novaclient import exceptions from novaclient import exceptions
# 100Mb log files # 100Mb log files
maxBytes=1024*1024*100 maxBytes = 1024*1024*100
logging.basicConfig(filename="/var/www/html/tebroker/testenv-worker.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.basicConfig(
filename="/var/www/html/tebroker/testenv-worker.log",
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CallbackClient(gear.Client): class CallbackClient(gear.Client):
@ -86,7 +88,7 @@ class TEWorkerThread(threading.Thread):
self.runJob() self.runJob()
except gear.InterruptedError: except gear.InterruptedError:
logger.info('getJob interrupted...') logger.info('getJob interrupted...')
except: except Exception:
logger.exception('Error while run_te_worker worker') logger.exception('Error while run_te_worker worker')
self.running = False self.running = False
@ -132,17 +134,18 @@ class TEWorkerThread(threading.Thread):
with tempfile.NamedTemporaryFile('r') as fp: with tempfile.NamedTemporaryFile('r') as fp:
os.environ["TE_DATAFILE"] = fp.name os.environ["TE_DATAFILE"] = fp.name
logger.info( logger.info(
subprocess.check_output([self.scriptfiles[0], subprocess.check_output([
self.num, self.scriptfiles[0],
arguments.get("envsize","2"), self.num,
arguments.get("ucinstance",""), arguments.get("envsize", "2"),
arguments.get("create_undercloud", ""), arguments.get("ucinstance", ""),
arguments.get("ssh_key", ""), arguments.get("create_undercloud", ""),
arguments.get("net_iso", "multi-nic"), arguments.get("ssh_key", ""),
arguments.get("compute_envsize","0"), arguments.get("net_iso", "multi-nic"),
arguments.get("extra_nodes", "0"), arguments.get("compute_envsize", "0"),
], arguments.get("extra_nodes", "0"),
stderr=subprocess.STDOUT)) ],
stderr=subprocess.STDOUT))
clientdata = fp.read() clientdata = fp.read()
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
logger.error(e.output) logger.error(e.output)
@ -164,7 +167,8 @@ class TEWorkerThread(threading.Thread):
if not cb_job.running: if not cb_job.running:
logger.error("No sign of the Callback job starting," logger.error("No sign of the Callback job starting,"
"assuming its no longer present") "assuming its no longer present")
clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT) clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
logger.info(clientdata) logger.info(clientdata)
client.shutdown() client.shutdown()
return return
@ -182,7 +186,8 @@ class TEWorkerThread(threading.Thread):
else: else:
logger.info('Returned from Job : %s', cb_job.data) logger.info('Returned from Job : %s', cb_job.data)
try: try:
clientdata = subprocess.check_output([self.scriptfiles[1], self.num], stderr=subprocess.STDOUT) clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
logger.error(e.output) logger.error(e.output)
raise raise
@ -238,7 +243,7 @@ def _check_instance_alive(nclient, instance, event):
""" """
if instance: if instance:
try: try:
i = nclient.servers.get(instance) nclient.servers.get(instance)
except exceptions.NotFound: except exceptions.NotFound:
# There is a very brief period of time where instance could be set # There is a very brief period of time where instance could be set
# and event not. It's unlikely to happen, but let's be safe. # and event not. It's unlikely to happen, but let's be safe.
@ -254,8 +259,10 @@ def main(args=sys.argv[1:]):
'"locked" state while it calls back to the client. The ' '"locked" state while it calls back to the client. The '
'clients job is provided with data (contents of datafile)' 'clients job is provided with data (contents of datafile)'
) )
parser.add_argument('scriptfiles', nargs=2, parser.add_argument(
help='Path to a script whos output is provided to the client') 'scriptfiles',
nargs=2,
help='Path to a script whos output is provided to the client')
parser.add_argument('--timeout', '-t', type=int, default=10800, parser.add_argument('--timeout', '-t', type=int, default=10800,
help='The maximum number of seconds to hold the ' help='The maximum number of seconds to hold the '
'testenv for, can be overridden by the client.') 'testenv for, can be overridden by the client.')
@ -271,7 +278,10 @@ def main(args=sys.argv[1:]):
global logger global logger
logger = logging.getLogger('testenv-worker-' + opts.tenum) logger = logging.getLogger('testenv-worker-' + opts.tenum)
logger.addHandler(logging.handlers.RotatingFileHandler("/var/www/html/tebroker/testenv-worker.log", maxBytes=maxBytes, backupCount=5)) logger.addHandler(logging.handlers.RotatingFileHandler(
"/var/www/html/tebroker/testenv-worker.log",
maxBytes=maxBytes,
backupCount=5))
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
logger.removeHandler(logger.handlers[0]) logger.removeHandler(logger.handlers[0])
@ -279,7 +289,11 @@ def main(args=sys.argv[1:]):
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
logger.info('Starting test-env worker with data %r', opts.scriptfiles) logger.info('Starting test-env worker with data %r', opts.scriptfiles)
te_worker = TEWorkerThread(opts.geard, opts.tenum, opts.timeout, opts.scriptfiles) te_worker = TEWorkerThread(
opts.geard,
opts.tenum,
opts.timeout,
opts.scriptfiles)
te_worker.start() te_worker.start()

View File

@ -9,8 +9,7 @@ function set_env {
# The updates job already takes a long time, always use cache for it # The updates job already takes a long time, always use cache for it
[[ "$TOCI_JOBTYPE" =~ updates ]] && set_env "false" [[ "$TOCI_JOBTYPE" =~ updates ]] && set_env "false"
# There are some projects that require images building # There are some projects that require images building
for PROJFULLREF in ${ZUUL_CHANGES//^/ }; for PROJFULLREF in ${ZUUL_CHANGES//^/ }; do
do
PROJ=${PROJFULLREF%%:*}; PROJ=${PROJFULLREF%%:*};
PROJ=${PROJ##*/}; PROJ=${PROJ##*/};
[[ "$PROJ" =~ diskimage-builder|tripleo-image-elements|tripleo-puppet-elements|instack-undercloud|python-tripleoclient|tripleo-common ]] && set_env "true" [[ "$PROJ" =~ diskimage-builder|tripleo-image-elements|tripleo-puppet-elements|instack-undercloud|python-tripleoclient|tripleo-common ]] && set_env "true"

View File

@ -167,11 +167,11 @@ NODEPOOL_RDO_PROXY=${NODEPOOL_RDO_PROXY:-https://trunk.rdoproject.org}
NODEPOOL_BUILDLOGS_CENTOS_PROXY="${NODEPOOL_BUILDLOGS_CENTOS_PROXY:-https://buildlogs.centos.org}" NODEPOOL_BUILDLOGS_CENTOS_PROXY="${NODEPOOL_BUILDLOGS_CENTOS_PROXY:-https://buildlogs.centos.org}"
NODEPOOL_CBS_CENTOS_PROXY="${NODEPOOL_CBS_CENTOS_PROXY:-https://cbs.centos.org/repos}" NODEPOOL_CBS_CENTOS_PROXY="${NODEPOOL_CBS_CENTOS_PROXY:-https://cbs.centos.org/repos}"
OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\ OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
$REPO_PREFIX/$CEPH_REPO_FILE" $REPO_PREFIX/$CEPH_REPO_FILE"
OPSTOOLS_REPO_ENABLED=${OPSTOOLS_REPO_ENABLED:-"0"} OPSTOOLS_REPO_ENABLED=${OPSTOOLS_REPO_ENABLED:-"0"}
if [[ "${OPSTOOLS_REPO_ENABLED}" = 1 ]]; then if [[ "${OPSTOOLS_REPO_ENABLED}" = 1 ]]; then
OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\ OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=${OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF}"\
$REPO_PREFIX/centos-opstools.repo" $REPO_PREFIX/centos-opstools.repo"
fi fi
FEATURE_BRANCH=${FEATURE_BRANCH:-} FEATURE_BRANCH=${FEATURE_BRANCH:-}
DELOREAN_SETUP=${DELOREAN_SETUP:-""} DELOREAN_SETUP=${DELOREAN_SETUP:-""}
@ -250,7 +250,11 @@ function log {
} }
function source_rc { function source_rc {
if [ $1 = "stackrc" ] ; then cloud="Undercloud"; else cloud="Overcloud"; fi if [ $1 = "stackrc" ]; then
cloud="Undercloud"
else
cloud="Overcloud"
fi
echo "You must source a $1 file for the $cloud." echo "You must source a $1 file for the $cloud."
echo "Attempting to source $HOME/$1" echo "Attempting to source $HOME/$1"
source $HOME/$1 source $HOME/$1
@ -665,8 +669,7 @@ function overcloud_deploy {
exitval=0 exitval=0
log "Deploy command arguments: $OVERCLOUD_DEPLOY_ARGS" log "Deploy command arguments: $OVERCLOUD_DEPLOY_ARGS"
openstack overcloud deploy $OVERCLOUD_DEPLOY_ARGS || exitval=1 openstack overcloud deploy $OVERCLOUD_DEPLOY_ARGS || exitval=1
if [ $exitval -eq 1 ]; if [ $exitval -eq 1 ]; then
then
log "Overcloud create - FAILED!" log "Overcloud create - FAILED!"
exit 1 exit 1
fi fi
@ -713,8 +716,7 @@ function overcloud_update {
log "Overcloud update started." log "Overcloud update started."
exitval=0 exitval=0
openstack overcloud deploy $OVERCLOUD_UPDATE_ARGS || exitval=1 openstack overcloud deploy $OVERCLOUD_UPDATE_ARGS || exitval=1
if [ $exitval -eq 1 ]; if [ $exitval -eq 1 ]; then
then
log "Overcloud update - FAILED!" log "Overcloud update - FAILED!"
exit 1 exit 1
fi fi

View File

@ -30,7 +30,7 @@ and Lauchpad API connections) and generate the CI reports which contact
the API of the upstream Jenkins servers. the API of the upstream Jenkins servers.
If you want to do a quick build to test out new HTML formatting, etc. you If you want to do a quick build to test out new HTML formatting, etc. you
can disable the reviewday and CI reports by running the following: can disable the reviewday and CI reports by running the following:
cd tripleo-ci/scripts/website cd tripleo-ci/scripts/website
SKIP\_REVIEWDAY="Y" SKIP\_CI\_REPORTS="Y" OUT\_HTML='out\_html' bash generate\_site.sh SKIP\_REVIEWDAY="Y" SKIP\_CI\_REPORTS="Y" OUT\_HTML='out\_html' bash generate\_site.sh

View File

@ -24,32 +24,32 @@ SKIP_BLOG=${SKIP_BLOG:-''}
# TRIPLEO-DOCS # TRIPLEO-DOCS
if [ ! -d tripleo-docs ]; then if [ ! -d tripleo-docs ]; then
git clone git://git.openstack.org/openstack/tripleo-docs git clone git://git.openstack.org/openstack/tripleo-docs
pushd tripleo-docs pushd tripleo-docs
tox -edocs #initial run tox -edocs #initial run
popd popd
else else
pushd tripleo-docs pushd tripleo-docs
git reset --hard origin/master git reset --hard origin/master
git pull git pull
# NOTE(bnemec): We need to rebuild this venv each time or changes to # NOTE(bnemec): We need to rebuild this venv each time or changes to
# tripleosphinx won't be picked up. # tripleosphinx won't be picked up.
tox -re docs tox -re docs
popd popd
fi fi
# TRIPLEO SPHINX # TRIPLEO SPHINX
if [ ! -d tripleosphinx ]; then if [ ! -d tripleosphinx ]; then
git clone https://github.com/dprince/tripleosphinx.git git clone https://github.com/dprince/tripleosphinx.git
pushd tripleosphinx pushd tripleosphinx
tox -edocs #creates the blank.html tox -edocs #creates the blank.html
popd popd
else else
pushd tripleosphinx pushd tripleosphinx
git reset --hard origin/master git reset --hard origin/master
git pull git pull
tox -edocs #creates the blank.html tox -edocs #creates the blank.html
popd popd
fi fi
# swap in custom tripleosphinx # swap in custom tripleosphinx
@ -60,33 +60,33 @@ popd
#REVIEWDAY #REVIEWDAY
if [ ! -d reviewday ]; then if [ ! -d reviewday ]; then
git clone git://git.openstack.org/openstack-infra/reviewday git clone git://git.openstack.org/openstack-infra/reviewday
else else
pushd reviewday pushd reviewday
git reset --hard origin/master git reset --hard origin/master
git pull git pull
popd popd
fi fi
#TRIPLEO CI #TRIPLEO CI
if [ ! -d tripleo-ci ]; then if [ ! -d tripleo-ci ]; then
git clone git://git.openstack.org/openstack-infra/tripleo-ci git clone git://git.openstack.org/openstack-infra/tripleo-ci
else else
pushd tripleo-ci pushd tripleo-ci
git reset --hard origin/master git reset --hard origin/master
git pull git pull
popd popd
fi fi
#Planet (Blog Feed Aggregator) #Planet (Blog Feed Aggregator)
PLANET_DIR='planet-venus' PLANET_DIR='planet-venus'
if [ ! -d '$PLANET_DIR' ]; then if [ ! -d '$PLANET_DIR' ]; then
git clone https://github.com/rubys/venus.git $PLANET_DIR git clone https://github.com/rubys/venus.git $PLANET_DIR
else else
pushd $PLANET_DIR pushd $PLANET_DIR
git reset --hard origin/master git reset --hard origin/master
git pull git pull
popd popd
fi fi
#----------------------------------------- #-----------------------------------------
@ -104,54 +104,54 @@ $SUDO_CP mkdir -p $OUT_HTML
# Reviewday # Reviewday
if [ -z "$SKIP_REVIEWDAY" ]; then if [ -z "$SKIP_REVIEWDAY" ]; then
pushd reviewday pushd reviewday
tox -erun -- "-p $REVIEWDAY_INPUT_FILE" tox -erun -- "-p $REVIEWDAY_INPUT_FILE"
$SUDO_CP cp -a arrow* out_report/*.png out_report/*.js out_report/*.css $OUT_HTML $SUDO_CP cp -a arrow* out_report/*.png out_report/*.js out_report/*.css $OUT_HTML
DATA=$(cat out_report/data_table.html) DATA=$(cat out_report/data_table.html)
popd popd
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/reviews.html OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/reviews.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>TripleO Reviews</h1>" >> $OUT_FILE echo "<h1>TripleO Reviews</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>TripleO: Reviews</title>|" -i $OUT_FILE # custom title sed -e "s|<title>.*|<title>TripleO: Reviews</title>|" -i $OUT_FILE # custom title
sed -e "s|<title>.*|<title>TripleO: Reviews</title><meta name='description' content='OpenStack Deployment Program Reviews'/>|" -i $OUT_FILE # custom title sed -e "s|<title>.*|<title>TripleO: Reviews</title><meta name='description' content='OpenStack Deployment Program Reviews'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
fi fi
# TripleO CI # TripleO CI
if [ -z "$SKIP_CI_REPORTS" ]; then if [ -z "$SKIP_CI_REPORTS" ]; then
pushd tripleo-ci pushd tripleo-ci
# jobs report # jobs report
tox -ecireport -- -b '^.*' tox -ecireport -- -b '^.*'
DATA=$(cat tripleo-jobs.html-table) DATA=$(cat tripleo-jobs.html-table)
popd popd
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/cistatus.html OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/cistatus.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>TripleO CI Status</h1>" >> $OUT_FILE echo "<h1>TripleO CI Status</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>TripleO: CI Status</title><meta name='description' content='OpenStack Deployment Program CI Status results'/>|" -i $OUT_FILE # custom title sed -e "s|<title>.*|<title>TripleO: CI Status</title><meta name='description' content='OpenStack Deployment Program CI Status results'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
fi fi
# Planet # Planet
if [ -z "$SKIP_BLOG" ]; then if [ -z "$SKIP_BLOG" ]; then
cp $SCRIPT_DIR/tripleo-ci/scripts/website/planet* $SCRIPT_DIR/$PLANET_DIR cp $SCRIPT_DIR/tripleo-ci/scripts/website/planet* $SCRIPT_DIR/$PLANET_DIR
pushd $SCRIPT_DIR/$PLANET_DIR pushd $SCRIPT_DIR/$PLANET_DIR
mkdir output mkdir output
rm planet.html.tmplc # cleanup from previous runs rm planet.html.tmplc # cleanup from previous runs
python planet.py planet.config.ini python planet.py planet.config.ini
popd popd
DATA=$(cat $PLANET_DIR/output/planet.html) DATA=$(cat $PLANET_DIR/output/planet.html)
OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/planet.html OUT_FILE=$SCRIPT_DIR/tripleo-docs/doc/build/html/planet.html
TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html TEMPLATE_FILE=$SCRIPT_DIR/tripleosphinx/doc/build/html/blank.html
sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half sed -n '1,/.*Custom Content Here/p' $TEMPLATE_FILE > $OUT_FILE #first half
echo "<h1>Planet TripleO</h1>" >> $OUT_FILE echo "<h1>Planet TripleO</h1>" >> $OUT_FILE
sed -e "s|<title>.*|<title>Planet TripleO</title><meta name='description' content='OpenStack Deployment Program Planet'/>|" -i $OUT_FILE # custom title sed -e "s|<title>.*|<title>Planet TripleO</title><meta name='description' content='OpenStack Deployment Program Planet'/>|" -i $OUT_FILE # custom title
echo "$DATA" >> $OUT_FILE echo "$DATA" >> $OUT_FILE
sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half sed -n '/.*Custom Content Here/,$p' $TEMPLATE_FILE >> $OUT_FILE #second half
fi fi
# Copy in the new web pages # Copy in the new web pages

View File

@ -1,5 +1,6 @@
---
parameter_defaults: parameter_defaults:
ControlPlaneSubnetCidr: "24" ControlPlaneSubnetCidr: "24"
ControlPlaneDefaultRoute: 192.168.24.1 ControlPlaneDefaultRoute: 192.168.24.1
EC2MetadataIp: 192.168.24.1 EC2MetadataIp: 192.168.24.1
DnsServers: ["8.8.8.8","8.8.4.4"] DnsServers: ["8.8.8.8", "8.8.4.4"]

View File

@ -25,7 +25,7 @@ parameters:
default: '' default: ''
description: IP address/subnet on the tenant network description: IP address/subnet on the tenant network
type: string type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: '' default: ''
description: IP address/subnet on the management network description: IP address/subnet on the management network
type: string type: string
@ -62,18 +62,18 @@ parameters:
default: '10.0.0.1' default: '10.0.0.1'
description: default route for the external network description: default route for the external network
type: string type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24' default: '24'
description: The subnet CIDR of the control plane network. description: The subnet CIDR of the control plane network.
type: string type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network. description: The default route of the control plane network.
type: string type: string
DnsServers: # Override this via parameter_defaults DnsServers: # Override this via parameter_defaults
default: [] default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server. description: The IP address of the EC2 metadata server.
type: string type: string

View File

@ -1,3 +1,4 @@
---
parameter_defaults: parameter_defaults:
ControllerSchedulerHints: ControllerSchedulerHints:
'capabilities:node': 'controller-%index%' 'capabilities:node': 'controller-%index%'

View File

@ -1,8 +1,8 @@
flake8
pytest pytest
pytest-html pytest-html
pytest-cov pytest-cov
mock mock
requests requests
pprint pprint
PyYAML pre-commit>=1.10 # MIT License
PyYAML

View File

@ -21,9 +21,9 @@
import argparse import argparse
import json import json
import logging import logging
import sys
import subprocess
import os import os
import subprocess
import sys
import tempfile import tempfile
import textwrap import textwrap
import threading import threading
@ -65,7 +65,8 @@ class TestCallback(object):
if time_waiting > 90: if time_waiting > 90:
logger.warn('%.1f seconds waiting for a worker.' % (time_waiting)) logger.warn('%.1f seconds waiting for a worker.' % (time_waiting))
if "Couldn't retrieve env" in job.arguments or "Failed creating OVB stack" in job.arguments: if "Couldn't retrieve env" in job.arguments or \
"Failed creating OVB stack" in job.arguments:
logger.error(job.arguments) logger.error(job.arguments)
self.rv = 2 self.rv = 2
job.sendWorkComplete("") job.sendWorkComplete("")
@ -80,7 +81,7 @@ class TestCallback(object):
try: try:
self.rv = subprocess.call(self.command) self.rv = subprocess.call(self.command)
except: except Exception:
logger.exception("Error calling command") logger.exception("Error calling command")
self.rv = 2 self.rv = 2
@ -195,14 +196,14 @@ def main(args=sys.argv[1:]):
job_params = { job_params = {
"callback_name": callback_name, "callback_name": callback_name,
"timeout": opts.timeout, "timeout": opts.timeout,
"envsize":opts.envsize, "envsize": opts.envsize,
"compute_envsize":opts.compute_envsize, "compute_envsize": opts.compute_envsize,
"ucinstance":opts.ucinstance, "ucinstance": opts.ucinstance,
"create_undercloud": "true" if opts.create_undercloud else "", "create_undercloud": "true" if opts.create_undercloud else "",
"ssh_key":opts.ssh_key, "ssh_key": opts.ssh_key,
"net_iso":opts.net_iso, "net_iso": opts.net_iso,
"extra_nodes":opts.extra_nodes, "extra_nodes": opts.extra_nodes,
"job_identifier":job_identifier, "job_identifier": job_identifier,
} }
job = gear.Job('lockenv', json.dumps(job_params)) job = gear.Job('lockenv', json.dumps(job_params))
client.submitJob(job) client.submitJob(job)
@ -227,5 +228,6 @@ def main(args=sys.argv[1:]):
logger.debug("Exiting with status : %d", cb.rv) logger.debug("Exiting with status : %d", cb.rv)
return cb.rv return cb.rv
if __name__ == '__main__': if __name__ == '__main__':
exit(main()) exit(main())

View File

@ -1,3 +1,4 @@
---
# Collect logs settings # Collect logs settings
# artcl_tar_gz: true # artcl_tar_gz: true

View File

@ -1,3 +1,4 @@
---
# TRIPLEO-CI environment settings # TRIPLEO-CI environment settings
undercloud_user: "{{ lookup('env','USER') }}" undercloud_user: "{{ lookup('env','USER') }}"
non_root_user: "{{ undercloud_user }}" non_root_user: "{{ undercloud_user }}"

View File

@ -1,3 +1,4 @@
---
undercloud_type: ovb undercloud_type: ovb
use_testenv_broker: true use_testenv_broker: true
build_test_packages: true build_test_packages: true

View File

@ -151,16 +151,16 @@ else
for playbook in $PLAYBOOKS; do for playbook in $PLAYBOOKS; do
echo "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" echo "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}"
run_with_timeout $START_JOB_TIME $QUICKSTART_INSTALL_CMD \ run_with_timeout $START_JOB_TIME $QUICKSTART_INSTALL_CMD \
"${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \ "${RELEASE_ARGS[$playbook]:=$QUICKSTART_DEFAULT_RELEASE_ARG}" \
$NODES_ARGS \ $NODES_ARGS \
$FEATURESET_CONF \ $FEATURESET_CONF \
$ENV_VARS \ $ENV_VARS \
$EXTRA_VARS \ $EXTRA_VARS \
$VXLAN_VARS \ $VXLAN_VARS \
$DEFAULT_ARGS \ $DEFAULT_ARGS \
--extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \ --extra-vars ci_job_end_time=$(( START_JOB_TIME + REMAINING_TIME*60 )) \
$LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \ $LOCAL_WORKING_DIR/playbooks/$playbook "${PLAYBOOKS_ARGS[$playbook]:-}" \
2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$? 2>&1 | tee -a $LOGS_DIR/quickstart_install.log && exit_value=0 || exit_value=$?
# Print status of playbook run # Print status of playbook run
[[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully" [[ "$exit_value" == 0 ]] && echo "Playbook run of $playbook passed successfully"

View File

@ -18,7 +18,8 @@ commands = pyflakes setup.py scripts
[testenv:linters] [testenv:linters]
basepython = python3 basepython = python3
whitelist_externals = bash whitelist_externals = bash
commands = flake8 --max-line-length 80 {toxinidir} {posargs} commands = python -m pre_commit run --source HEAD^ --origin HEAD
# deprecated: use linters instead. kept only as a convenience alias # deprecated: use linters instead. kept only as a convenience alias
[testenv:pep8] [testenv:pep8]

View File

@ -55,8 +55,8 @@
gate: gate:
queue: tripleo queue: tripleo
jobs: jobs:
# Don't put a files section on the linters job, otherwise no # Don't put a files section on the linters job, otherwise no
# jobs might be defined and nothing can merge in this repo. # jobs might be defined and nothing can merge in this repo.
- openstack-tox-linters - openstack-tox-linters
- openstack-tox-py27: - openstack-tox-py27:
files: files:

View File

@ -1,3 +1,4 @@
---
- nodeset: - nodeset:
name: two-centos-7-nodes name: two-centos-7-nodes
nodes: nodes: