Adds a Bashate target to tox.ini.

This will now run bashate on all shell scripts. We could ignore the
following list of bashate errors:

- E006: Line longer than 79 columns (as many scripts use jinja
        templating, this is very difficult)
- E040: Syntax error determined using `bash -n` (as many scripts
        use jinja templating, this will often fail and the syntax
        error will be discovered in execution anyway)

Change-Id: I6f6d454ac54a0ac98d8b8a092fa522cba092a592
Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
This commit is contained in:
Gael Chamoulaud 2017-02-06 15:01:25 +01:00
parent 619c512594
commit 5533b8f7a1
12 changed files with 57 additions and 34 deletions

View File

@ -11,8 +11,7 @@ BUILD_RESULT=$?
UNCOMMITTED_NOTES=$(git status --porcelain | \ UNCOMMITTED_NOTES=$(git status --porcelain | \
awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}') awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}')
if [ "${UNCOMMITTED_NOTES}" ] if [ "${UNCOMMITTED_NOTES}" ]; then
then
cat <<EOF cat <<EOF
REMINDER: The following changes to release notes have not been committed: REMINDER: The following changes to release notes have not been committed:
@ -25,4 +24,4 @@ committed changes, not the working directory.
EOF EOF
fi fi
exit ${BUILD_RESULT} exit ${BUILD_RESULT}

View File

@ -11,8 +11,8 @@ set -eux
## :: ## ::
{% for interface in (mtu_interface) %} {% for interface in (mtu_interface) %}
ip link set {{ interface }} mtu {{ mtu }} ip link set {{ interface }} mtu {{ mtu }}
echo "MTU={{ mtu }}" >> /etc/sysconfig/network-scripts/ifcfg-{{ interface }} echo "MTU={{ mtu }}" >> /etc/sysconfig/network-scripts/ifcfg-{{ interface }}
{% endfor %} {% endfor %}
## * Modify dnsmasq-ironic.conf ## * Modify dnsmasq-ironic.conf

View File

@ -30,8 +30,8 @@ neutron subnet-show $subnet_uuid
## :: ## ::
if ! [[ -e {{ container_image }} ]]; then if ! [[ -e {{ container_image }} ]]; then
curl -O {{ container_url }} curl -O {{ container_url }}
gunzip -f "{{ container_image }}.gz" gunzip -f "{{ container_image }}.gz"
fi fi
glance image-list | grep atomic-image || glance image-create --name atomic-image --file {{ container_image }} --disk-format qcow2 --container-format bare glance image-list | grep atomic-image || glance image-create --name atomic-image --file {{ container_image }} --disk-format qcow2 --container-format bare

View File

@ -28,7 +28,10 @@ openstack overcloud image upload {% if bash_deploy_ramdisk %}--old-deploy-image{
## * Upload images to glance, this step is specific to nodepool based deployments. ## * Upload images to glance, this step is specific to nodepool based deployments.
## :: ## ::
glance image-create --container-format bare --disk-format qcow2 --name overcloud-full --file overcloud-full.qcow2 glance image-create --container-format bare \
--disk-format qcow2 \
--name overcloud-full \
--file overcloud-full.qcow2
{% endif %} {% endif %}

View File

@ -32,9 +32,9 @@ sudo update-ca-trust extract
{% set _vip = overcloud_public_vip if not overcloud_ipv6|bool else overcloud_public_vip6 %} {% set _vip = overcloud_public_vip if not overcloud_ipv6|bool else overcloud_public_vip6 %}
openssl req -newkey rsa:2048 -days 365 \ openssl req -newkey rsa:2048 -days 365 \
-nodes -keyout {{ working_dir }}/server-key.pem \ -nodes -keyout {{ working_dir }}/server-key.pem \
-out {{ working_dir }}/server-req.pem \ -out {{ working_dir }}/server-req.pem \
-subj "/C=US/ST=NC/L=Raleigh/O=Red Hat/OU=OOOQ/CN={{_vip}}" -subj "/C=US/ST=NC/L=Raleigh/O=Red Hat/OU=OOOQ/CN={{_vip}}"
## * Process the server RSA key ## * Process the server RSA key
## :: ## ::
@ -47,8 +47,8 @@ openssl rsa -in {{ working_dir }}/server-key.pem \
## :: ## ::
openssl x509 -req -in server-req.pem -days 365 \ openssl x509 -req -in server-req.pem -days 365 \
-CA {{ working_dir }}/overcloud-cacert.pem \ -CA {{ working_dir }}/overcloud-cacert.pem \
-CAkey {{ working_dir }}/overcloud-ca-privkey.pem \ -CAkey {{ working_dir }}/overcloud-ca-privkey.pem \
-set_serial 01 -out {{ working_dir }}/server-cert.pem -set_serial 01 -out {{ working_dir }}/server-cert.pem
## --stop_docs ## --stop_docs

View File

@ -9,13 +9,13 @@ onerror(){
--nested-depth 5 overcloud | grep FAIL | --nested-depth 5 overcloud | grep FAIL |
grep 'StructuredDeployment ' | cut -d '|' -f3) grep 'StructuredDeployment ' | cut -d '|' -f3)
do echo -n "heat deployment-show out put for deployment: $failed" >> failed_upgrade.log do echo -n "heat deployment-show out put for deployment: $failed" >> failed_upgrade.log
echo -n "######################################################" >> failed_upgrade.log echo -n "######################################################" >> failed_upgrade.log
heat deployment-show $failed >> failed_upgrade.log heat deployment-show $failed >> failed_upgrade.log
echo -n "######################################################" >> failed_upgrade.log echo -n "######################################################" >> failed_upgrade.log
echo "puppet standard error for deployment: $failed" >> failed_upgrade.log echo "puppet standard error for deployment: $failed" >> failed_upgrade.log
echo -n "######################################################" >> failed_upgrade.log echo -n "######################################################" >> failed_upgrade.log
echo -e $(heat deployment-show $failed | jq .output_values.deploy_stderr) >> failed_upgrade.log echo -e $(heat deployment-show $failed | jq .output_values.deploy_stderr) >> failed_upgrade.log
echo -n "######################################################" >> failed_upgrade.log echo -n "######################################################" >> failed_upgrade.log
done done
exit 1 exit 1
} }

View File

@ -15,15 +15,15 @@ while read sub; do
## * Create the expected directories and symlinks ## * Create the expected directories and symlinks
## :: ## ::
ssh $SSH_ARGS $sub mkdir -p $WORKSPACE/tripleo ssh $SSH_ARGS $sub mkdir -p $WORKSPACE/tripleo
ssh $SSH_ARGS $sub ln -sf $WORKSPACE/tripleo $WORKSPACE/tripleo/new ssh $SSH_ARGS $sub ln -sf $WORKSPACE/tripleo $WORKSPACE/tripleo/new
## * Clone the appropriate repositories in the expected locations ## * Clone the appropriate repositories in the expected locations
## :: ## ::
ssh $SSH_ARGS $sub git clone https://git.openstack.org/openstack-infra/tripleo-ci $WORKSPACE/tripleo/tripleo-ci ssh $SSH_ARGS $sub git clone https://git.openstack.org/openstack-infra/tripleo-ci $WORKSPACE/tripleo/tripleo-ci
ssh $SSH_ARGS $sub git clone https://git.openstack.org/openstack-dev/devstack $WORKSPACE/tripleo/devstack ssh $SSH_ARGS $sub git clone https://git.openstack.org/openstack-dev/devstack $WORKSPACE/tripleo/devstack
ssh $SSH_ARGS $sub git clone https://git.openstack.org/openstack-infra/devstack-gate $WORKSPACE/tripleo/devstack-gate ssh $SSH_ARGS $sub git clone https://git.openstack.org/openstack-infra/devstack-gate $WORKSPACE/tripleo/devstack-gate
done < /etc/nodepool/sub_nodes_private done < /etc/nodepool/sub_nodes_private

View File

@ -10,12 +10,16 @@ CONTROLLER0=$(nova list | grep controller-0 | awk '{print $12}' | cut -f2 -d=)
{% if release == 'newton' or release == 'mitaka' %} {% if release == 'newton' or release == 'mitaka' %}
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1348222 # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1348222
for CONTROLLER in $CONTROLLERS; do $SSH heat-admin@$CONTROLLER sudo pip install redis; done for CONTROLLER in $CONTROLLERS; do
$SSH heat-admin@$CONTROLLER sudo pip install redis;
done
{% endif %} {% endif %}
{% if release == 'mitaka' %} {% if release == 'mitaka' %}
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1357229 # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1357229
for CONTROLLER in $CONTROLLERS; do $SSH heat-admin@$CONTROLLER "sudo sed -i -e 's/^After=.*/After=syslog.target network.target/g' /usr/lib/systemd/system/openstack-heat-engine.service"; done for CONTROLLER in $CONTROLLERS; do
$SSH heat-admin@$CONTROLLER "sudo sed -i -e 's/^After=.*/After=syslog.target network.target/g' /usr/lib/systemd/system/openstack-heat-engine.service";
done
{% endif %} {% endif %}
{% if release == 'newton' or release == 'mitaka' %} {% if release == 'newton' or release == 'mitaka' %}

View File

@ -31,8 +31,8 @@ git clone "https://github.com/rthallisey/clapper.git" "{{ working_dir }}/clapper
export IPMI_VALIDATE="$(python {{ working_dir }}/clapper/instackenv-validator.py -f {{ working_dir }}/instackenv.json)" export IPMI_VALIDATE="$(python {{ working_dir }}/clapper/instackenv-validator.py -f {{ working_dir }}/instackenv.json)"
if [[ $IPMI_VALIDATE != *SUCCESS* ]] ; then if [[ $IPMI_VALIDATE != *SUCCESS* ]] ; then
echo "instackenv.json did not validate." echo "instackenv.json did not validate."
echo $IPMI_VALIDATE echo $IPMI_VALIDATE
fi fi
### --stop_docs ### --stop_docs

View File

@ -12,11 +12,11 @@ set -eux
## * Create cleanup env function for heat ## * Create cleanup env function for heat
## :: ## ::
function cleanup() { function cleanup {
{% if release in ['kilo', 'liberty', 'rhos-7'] %} {% if release in ['kilo', 'liberty', 'rhos-7'] %}
heat stack-delete {{ validate_stack_name }} heat stack-delete {{ validate_stack_name }}
{% else %} {% else %}
openstack stack delete --yes {{ validate_stack_name }} openstack stack delete --yes {{ validate_stack_name }}
{% endif %} {% endif %}
if [[ $(tripleo wait_for -w 300 -d 30 -s "Stack not found" -- "heat stack-show {{ validate_stack_name }}") ]]; then if [[ $(tripleo wait_for -w 300 -d 30 -s "Stack not found" -- "heat stack-show {{ validate_stack_name }}") ]]; then
echo "openstack stack delete" echo "openstack stack delete"
@ -25,7 +25,7 @@ function cleanup() {
{% if release in ['kilo', 'liberty', 'rhos-7'] %} {% if release in ['kilo', 'liberty', 'rhos-7'] %}
heat stack-delete {{ validate_stack_name }} heat stack-delete {{ validate_stack_name }}
{% else %} {% else %}
openstack stack delete --yes {{ validate_stack_name }} openstack stack delete --yes {{ validate_stack_name }}
{% endif %} {% endif %}
fi fi
fi fi
@ -104,7 +104,7 @@ heat stack-create {{ validate_stack_name }} --template-file {{ working_dir }}/t
### --stop_docs ### --stop_docs
/bin/bash /usr/libexec/openstack-tripleo/wait_for -w 600 --delay 30 \ /bin/bash /usr/libexec/openstack-tripleo/wait_for -w 600 --delay 30 \
--success-match {{ validate_success_status }} -- heat stack-show {{ validate_stack_name }} --success-match {{ validate_success_status }} -- heat stack-show {{ validate_stack_name }}
sleep 30 sleep 30
### --start_docs ### --start_docs

View File

@ -1,5 +1,6 @@
hacking<0.11,>=0.10 hacking<0.11,>=0.10
bashate>=0.2 # Apache-2.0
ansible-lint ansible-lint
sphinx!=1.3b1,<1.4,>=1.2.1 # BSD sphinx!=1.3b1,<1.4,>=1.2.1 # BSD
oslosphinx>=4.7.0 # Apache-2.0 oslosphinx>=4.7.0 # Apache-2.0

16
tox.ini
View File

@ -21,6 +21,21 @@ commands = bindep test
[testenv:docs] [testenv:docs]
commands = python setup.py build_sphinx commands = python setup.py build_sphinx
[testenv:bashate]
commands =
# Run bashate check for all bash scripts
# Ignores the following rules:
# E006: Line longer than 79 columns (as many scripts use jinja
# templating, this is very difficult)
# E040: Syntax error determined using `bash -n` (as many scripts
# use jinja templating, this will often fail and the syntax
# error will be discovered in execution anyway)
bash -c "grep --recursive --binary-files=without-match \
--files-with-match '^.!.*\(ba\)\?sh$' \
--exclude-dir .tox \
--exclude-dir .git \
{toxinidir} | xargs bashate --error . --verbose --ignore=E006,E040"
[testenv:pep8] [testenv:pep8]
commands = commands =
# Run hacking/flake8 check for all python files # Run hacking/flake8 check for all python files
@ -46,6 +61,7 @@ commands = bash -c ci-scripts/releasenotes_tox.sh
[testenv:linters] [testenv:linters]
commands = commands =
{[testenv:bashate]commands}
{[testenv:pep8]commands} {[testenv:pep8]commands}
{[testenv:ansible-lint]commands} {[testenv:ansible-lint]commands}