Add bashate checks to pep8 step

Similarly to pep8 checks, this allows enforcing a consistent
style of the shell scripts accross modfications. For now
only the indentation is enforced to reduce code churn.

Closes-Bug: 1648099
Change-Id: Ie66cbe1aea4bd01a8bba8833ef6cbd2cff6a7c6a
This commit is contained in:
Dirk Mueller 2016-10-25 22:46:14 +02:00
parent 73c5f6ea4f
commit 80fc5a2d42
20 changed files with 118 additions and 113 deletions

View File

@ -53,10 +53,8 @@ echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}"
echo "Missing lines in master : ${baseline_missing}" echo "Missing lines in master : ${baseline_missing}"
echo "Missing lines in proposed change : ${current_missing}" echo "Missing lines in proposed change : ${current_missing}"
if [ $allowed_missing -ge $current_missing ]; if [ $allowed_missing -ge $current_missing ]; then
then if [ $baseline_missing -lt $current_missing ]; then
if [ $baseline_missing -lt $current_missing ];
then
show_diff $baseline_report $current_report show_diff $baseline_report $current_report
echo "We believe you can test your code with 100% coverage!" echo "We believe you can test your code with 100% coverage!"
else else

View File

@ -3,38 +3,37 @@
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then
if [ "$ENABLE_CINDER" == "False" ]; then if [ "$ENABLE_CINDER" == "False" ]; then
# FIXME(yuanying): Use ephemeral disk for docker storage # FIXME(yuanying): Use ephemeral disk for docker storage
# Currently Ironic doesn't support cinder volumes, # Currently Ironic doesn't support cinder volumes,
# so we must use preserved ephemeral disk instead of a cinder volume. # so we must use preserved ephemeral disk instead of a cinder volume.
device_path=$(readlink -f /dev/disk/by-label/ephemeral0) device_path=$(readlink -f /dev/disk/by-label/ephemeral0)
else else
attempts=60 attempts=60
while [ ${attempts} -gt 0 ]; do while [ ${attempts} -gt 0 ]; do
device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$)
if [ -n "${device_name}" ]; then if [ -n "${device_name}" ]; then
break break
fi
echo "waiting for disk device"
sleep 0.5
udevadm trigger
let attempts--
done
if [ -z "${device_name}" ]; then
echo "ERROR: disk device does not exist" >&2
exit 1
fi fi
echo "waiting for disk device"
sleep 0.5
udevadm trigger
let attempts--
done
if [ -z "${device_name}" ]; then device_path=/dev/disk/by-id/${device_name}
echo "ERROR: disk device does not exist" >&2
exit 1
fi fi
device_path=/dev/disk/by-id/${device_name}
fi
fi fi
$configure_docker_storage_driver $configure_docker_storage_driver
if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then
if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) \ if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) = $(uname -r) ]; then
= $(uname -r) ]; then
ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Cluster node kernel version: $(uname -r)" ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Cluster node kernel version: $(uname -r)"
echo "ERROR: ${ERROR_MESSAGE}" >&2 echo "ERROR: ${ERROR_MESSAGE}" >&2
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'" sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'"

View File

@ -3,8 +3,8 @@
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
if [ -z "$KUBE_NODE_IP" ]; then if [ -z "$KUBE_NODE_IP" ]; then
# FIXME(yuanying): Set KUBE_NODE_IP correctly # FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi fi
myip="${KUBE_NODE_IP}" myip="${KUBE_NODE_IP}"

View File

@ -5,16 +5,16 @@
echo "configuring kubernetes (master)" echo "configuring kubernetes (master)"
if [ -z "$KUBE_NODE_IP" ]; then if [ -z "$KUBE_NODE_IP" ]; then
# FIXME(yuanying): Set KUBE_NODE_IP correctly # FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi fi
sed -i ' sed -i '
/^ETCD_LISTEN_CLIENT_URLS=/ s/=.*/="http:\/\/0.0.0.0:2379"/ /^ETCD_LISTEN_CLIENT_URLS=/ s/=.*/="http:\/\/0.0.0.0:2379"/
' /etc/etcd/etcd.conf ' /etc/etcd/etcd.conf
sed -i ' sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
' /etc/kubernetes/config ' /etc/kubernetes/config
KUBE_API_ARGS="--runtime-config=api/all=true" KUBE_API_ARGS="--runtime-config=api/all=true"
@ -30,11 +30,11 @@ else
fi fi
sed -i ' sed -i '
/^KUBE_API_ADDRESS=/ s/=.*/='"${KUBE_API_ADDRESS}"'/ /^KUBE_API_ADDRESS=/ s/=.*/='"${KUBE_API_ADDRESS}"'/
/^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"| /^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"|
/^KUBE_API_ARGS=/ s/KUBE_API_ARGS.// /^KUBE_API_ARGS=/ s/KUBE_API_ARGS.//
/^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/ /^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/
/^KUBE_ADMISSION_CONTROL=/ s/=.*/=""/ /^KUBE_ADMISSION_CONTROL=/ s/=.*/=""/
' /etc/kubernetes/apiserver ' /etc/kubernetes/apiserver
cat << _EOC_ >> /etc/kubernetes/apiserver cat << _EOC_ >> /etc/kubernetes/apiserver
#Uncomment the following line to disable Load Balancer feature #Uncomment the following line to disable Load Balancer feature
@ -44,8 +44,8 @@ KUBE_API_ARGS="$KUBE_API_ARGS"
_EOC_ _EOC_
sed -i ' sed -i '
/^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/ /^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s/KUBE_CONTROLLER_MANAGER_ARGS.*/#Uncomment the following line to enable Kubernetes Load Balancer feature \n#KUBE_CONTROLLER_MANAGER_ARGS="--cloud-config=\/etc\/sysconfig\/kube_openstack_config --cloud-provider=openstack"/ /^KUBE_CONTROLLER_MANAGER_ARGS=/ s/KUBE_CONTROLLER_MANAGER_ARGS.*/#Uncomment the following line to enable Kubernetes Load Balancer feature \n#KUBE_CONTROLLER_MANAGER_ARGS="--cloud-config=\/etc\/sysconfig\/kube_openstack_config --cloud-provider=openstack"/
' /etc/kubernetes/controller-manager ' /etc/kubernetes/controller-manager
KUBELET_ARGS="--register-node=true --register-schedulable=false --config=/etc/kubernetes/manifests --hostname-override=$KUBE_NODE_IP" KUBELET_ARGS="--register-node=true --register-schedulable=false --config=/etc/kubernetes/manifests --hostname-override=$KUBE_NODE_IP"
@ -56,7 +56,7 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
fi fi
sed -i ' sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/ /^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_ARGS=/ s|=.*|='"$KUBELET_ARGS"'| /^KUBELET_ARGS=/ s|=.*|='"$KUBELET_ARGS"'|
' /etc/kubernetes/kubelet ' /etc/kubernetes/kubelet

View File

@ -5,8 +5,8 @@
echo "configuring kubernetes (minion)" echo "configuring kubernetes (minion)"
if [ -z "$KUBE_NODE_IP" ]; then if [ -z "$KUBE_NODE_IP" ]; then
# FIXME(yuanying): Set KUBE_NODE_IP correctly # FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi fi
ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP} ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP}
@ -20,9 +20,9 @@ fi
KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT" KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT"
sed -i ' sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
/^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"| /^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"|
/^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"| /^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"|
' /etc/kubernetes/config ' /etc/kubernetes/config
# NOTE: Kubernetes plugin for Openstack requires that the node name registered # NOTE: Kubernetes plugin for Openstack requires that the node name registered
@ -40,19 +40,19 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
fi fi
sed -i ' sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/ /^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/ /^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_API_SERVER=/ s|=.*|="--api-servers='"$KUBE_MASTER_URI"'"| /^KUBELET_API_SERVER=/ s|=.*|="--api-servers='"$KUBE_MASTER_URI"'"|
/^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"| /^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"|
' /etc/kubernetes/kubelet ' /etc/kubernetes/kubelet
sed -i ' sed -i '
/^KUBE_PROXY_ARGS=/ s|=.*|='"$KUBE_CONFIG"'| /^KUBE_PROXY_ARGS=/ s|=.*|='"$KUBE_CONFIG"'|
' /etc/kubernetes/proxy ' /etc/kubernetes/proxy
if [ "$NETWORK_DRIVER" = "flannel" ]; then if [ "$NETWORK_DRIVER" = "flannel" ]; then
sed -i ' sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"| /^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' /etc/sysconfig/flanneld ' /etc/sysconfig/flanneld
# Make sure etcd has a flannel configuration # Make sure etcd has a flannel configuration

View File

@ -4,5 +4,5 @@
setenforce 0 setenforce 0
sed -i ' sed -i '
/^SELINUX=/ s/=.*/=permissive/ /^SELINUX=/ s/=.*/=permissive/
' /etc/selinux/config ' /etc/selinux/config

View File

@ -69,8 +69,8 @@ USER_TOKEN=`curl -k -s -i -X POST -H "$content_type" -d "$auth_json" $url \
# Get CA certificate for this cluster # Get CA certificate for this cluster
curl -k -X GET \ curl -k -X GET \
-H "X-Auth-Token: $USER_TOKEN" \ -H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
# Create config for client's csr # Create config for client's csr
cat > ${cert_conf_dir}/client.conf <<EOF cat > ${cert_conf_dir}/client.conf <<EOF
@ -111,7 +111,7 @@ chmod 500 "${cert_dir}"
chown -R kube:kube "${cert_dir}" chown -R kube:kube "${cert_dir}"
sed -i ' sed -i '
s|CA_CERT|'"$CA_CERT"'| s|CA_CERT|'"$CA_CERT"'|
s|CLIENT_CERT|'"$CLIENT_CERT"'| s|CLIENT_CERT|'"$CLIENT_CERT"'|
s|CLIENT_KEY|'"$CLIENT_KEY"'| s|CLIENT_KEY|'"$CLIENT_KEY"'|
' /srv/kubernetes/kubeconfig.yaml ' /srv/kubernetes/kubeconfig.yaml

View File

@ -21,7 +21,7 @@ set -o nounset
set -o pipefail set -o pipefail
if [ "$TLS_DISABLED" == "True" ]; then if [ "$TLS_DISABLED" == "True" ]; then
exit 0 exit 0
fi fi
if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then
@ -33,11 +33,11 @@ fi
sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}" sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}"
if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \ if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \
&& [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then && [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then
sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}" sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}"
fi fi
if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \ if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \
&& [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then && [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then
sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}" sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}"
fi fi
MASTER_HOSTNAME=${MASTER_HOSTNAME:-} MASTER_HOSTNAME=${MASTER_HOSTNAME:-}
@ -91,8 +91,8 @@ USER_TOKEN=`curl -k -s -i -X POST -H "$content_type" -d "$auth_json" $url \
# Get CA certificate for this cluster # Get CA certificate for this cluster
curl -k -X GET \ curl -k -X GET \
-H "X-Auth-Token: $USER_TOKEN" \ -H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT} $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
# Create config for server's csr # Create config for server's csr
cat > ${cert_conf_dir}/server.conf <<EOF cat > ${cert_conf_dir}/server.conf <<EOF

View File

@ -28,7 +28,7 @@ fi
echo "creating flanneld config in etcd" echo "creating flanneld config in etcd"
while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \ while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \
-X PUT --data-urlencode value@${FLANNEL_JSON}; do -X PUT --data-urlencode value@${FLANNEL_JSON}; do
echo "waiting for etcd" echo "waiting for etcd"
sleep 1 sleep 1
done done

View File

@ -12,7 +12,7 @@ FLANNEL_JSON=/etc/sysconfig/flannel-network.json
FLANNELD_CONFIG=/etc/sysconfig/flanneld FLANNELD_CONFIG=/etc/sysconfig/flanneld
sed -i ' sed -i '
/^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/ /^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/
' /etc/sysconfig/flanneld ' /etc/sysconfig/flanneld
# Generate a flannel configuration that we will # Generate a flannel configuration that we will

View File

@ -14,7 +14,7 @@ FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service
FLANNEL_JSON=/etc/sysconfig/flannel-network.json FLANNEL_JSON=/etc/sysconfig/flannel-network.json
sed -i ' sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"| /^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' $FLANNELD_CONFIG ' $FLANNELD_CONFIG
. $FLANNELD_CONFIG . $FLANNELD_CONFIG
@ -35,7 +35,7 @@ fi
echo "creating flanneld config in etcd" echo "creating flanneld config in etcd"
while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \ while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \
-X PUT --data-urlencode value@${FLANNEL_JSON}; do -X PUT --data-urlencode value@${FLANNEL_JSON}; do
echo "waiting for etcd" echo "waiting for etcd"
sleep 1 sleep 1
done done

View File

@ -5,8 +5,8 @@
echo "Configuring ${NETWORK_DRIVER} network service ..." echo "Configuring ${NETWORK_DRIVER} network service ..."
if [ "$NETWORK_DRIVER" == "docker" ]; then if [ "$NETWORK_DRIVER" == "docker" ]; then
DOCKER_NETWORK_OPTIONS="--cluster-store etcd://$ETCD_SERVER_IP:2379\ DOCKER_NETWORK_OPTIONS="--cluster-store etcd://$ETCD_SERVER_IP:2379 \
--cluster-advertise $SWARM_NODE_IP:9379" --cluster-advertise $SWARM_NODE_IP:9379"
sed -i "/^DOCKER_NETWORK_OPTIONS=/ s#=.*#='$DOCKER_NETWORK_OPTIONS'#" \ sed -i "/^DOCKER_NETWORK_OPTIONS=/ s#=.*#='$DOCKER_NETWORK_OPTIONS'#" \
/etc/sysconfig/docker-network /etc/sysconfig/docker-network
fi fi

View File

@ -48,7 +48,7 @@ After=docker.service
RemainAfterExit=yes RemainAfterExit=yes
ExecStartPre=-/usr/bin/docker pull openstackmagnum/rexray:alpine ExecStartPre=-/usr/bin/docker pull openstackmagnum/rexray:alpine
ExecStart=/usr/bin/rm -f /var/run/rexray/rexray.pid && \ ExecStart=/usr/bin/rm -f /var/run/rexray/rexray.pid && \
/usr/bin/docker run -d --name=rexray --privileged -p 7979:7979 \ /usr/bin/docker run -d --name=rexray --privileged -p 7979:7979 \
-v /run/docker/plugins:/run/docker/plugins \ -v /run/docker/plugins:/run/docker/plugins \
-v /var/lib/rexray:/var/lib/rexray:z \ -v /var/lib/rexray:/var/lib/rexray:z \
-v /var/log/rexray:/var/log/rexray \ -v /var/log/rexray:/var/log/rexray \

View File

@ -20,25 +20,25 @@ EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network EnvironmentFile=-/etc/sysconfig/docker-network
Environment=GOTRACEBACK=crash Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/docker daemon -H fd:// \\ ExecStart=/usr/bin/docker daemon -H fd:// \\
-H tcp://0.0.0.0:2375 \\ -H tcp://0.0.0.0:2375 \\
END_SERVICE_TOP END_SERVICE_TOP
if [ "$TLS_DISABLED" = 'False' ]; then if [ "$TLS_DISABLED" = 'False' ]; then
cat >> /etc/systemd/system/docker.service << END_TLS cat >> /etc/systemd/system/docker.service << END_TLS
--tlsverify \\ --tlsverify \\
--tlscacert="/etc/docker/ca.crt" \\ --tlscacert="/etc/docker/ca.crt" \\
--tlskey="/etc/docker/server.key" \\ --tlskey="/etc/docker/server.key" \\
--tlscert="/etc/docker/server.crt" \\ --tlscert="/etc/docker/server.crt" \\
END_TLS END_TLS
fi fi
cat >> /etc/systemd/system/docker.service << END_SERVICE_BOTTOM cat >> /etc/systemd/system/docker.service << END_SERVICE_BOTTOM
\$OPTIONS \\ \$OPTIONS \\
\$DOCKER_STORAGE_OPTIONS \\ \$DOCKER_STORAGE_OPTIONS \\
\$DOCKER_NETWORK_OPTIONS \\ \$DOCKER_NETWORK_OPTIONS \\
\$INSECURE_REGISTRY \$INSECURE_REGISTRY
LimitNOFILE=1048576 LimitNOFILE=1048576
LimitNPROC=1048576 LimitNPROC=1048576
LimitCORE=infinity LimitCORE=infinity

View File

@ -18,14 +18,14 @@ TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill swarm-agent ExecStartPre=-/usr/bin/docker kill swarm-agent
ExecStartPre=-/usr/bin/docker rm swarm-agent ExecStartPre=-/usr/bin/docker rm swarm-agent
ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION
ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY \\ ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY \\
-e https_proxy=$HTTPS_PROXY \\ -e https_proxy=$HTTPS_PROXY \\
-e no_proxy=$NO_PROXY \\ -e no_proxy=$NO_PROXY \\
--name swarm-agent \\ --name swarm-agent \\
swarm:$SWARM_VERSION \\ swarm:$SWARM_VERSION \\
join \\ join \\
--addr $myip:2375 \\ --addr $myip:2375 \\
etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/ etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/
Restart=always Restart=always
ExecStop=/usr/bin/docker stop swarm-agent ExecStop=/usr/bin/docker stop swarm-agent
ExecStartPost=/usr/local/bin/notify-heat ExecStartPost=/usr/local/bin/notify-heat
@ -42,10 +42,10 @@ SCRIPT=/usr/local/bin/notify-heat
cat > $SCRIPT << EOF cat > $SCRIPT << EOF
#!/bin/sh #!/bin/sh
until etcdctl \ until etcdctl \
--peers $ETCD_SERVER_IP:2379 \ --peers $ETCD_SERVER_IP:2379 \
--timeout 1s \ --timeout 1s \
--total-timeout 5s \ --total-timeout 5s \
ls /v2/keys/swarm/docker/swarm/nodes/$myip:2375 ls /v2/keys/swarm/docker/swarm/nodes/$myip:2375
do do
echo "Waiting for swarm agent registration..." echo "Waiting for swarm agent registration..."
sleep 5 sleep 5

View File

@ -12,26 +12,26 @@ TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill swarm-manager ExecStartPre=-/usr/bin/docker kill swarm-manager
ExecStartPre=-/usr/bin/docker rm swarm-manager ExecStartPre=-/usr/bin/docker rm swarm-manager
ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION
ExecStart=/usr/bin/docker run --name swarm-manager \\ ExecStart=/usr/bin/docker run --name swarm-manager \\
-v /etc/docker:/etc/docker \\ -v /etc/docker:/etc/docker \\
-p 2376:2375 \\ -p 2376:2375 \\
-e http_proxy=$HTTP_PROXY \\ -e http_proxy=$HTTP_PROXY \\
-e https_proxy=$HTTPS_PROXY \\ -e https_proxy=$HTTPS_PROXY \\
-e no_proxy=$NO_PROXY \\ -e no_proxy=$NO_PROXY \\
swarm:$SWARM_VERSION \\ swarm:$SWARM_VERSION \\
manage -H tcp://0.0.0.0:2375 \\ manage -H tcp://0.0.0.0:2375 \\
--strategy $SWARM_STRATEGY \\ --strategy $SWARM_STRATEGY \\
--replication \\ --replication \\
--advertise $NODE_IP:2376 \\ --advertise $NODE_IP:2376 \\
END_SERVICE_TOP END_SERVICE_TOP
if [ $TLS_DISABLED = 'False' ]; then if [ $TLS_DISABLED = 'False' ]; then
cat >> /etc/systemd/system/swarm-manager.service << END_TLS cat >> /etc/systemd/system/swarm-manager.service << END_TLS
--tlsverify \\ --tlsverify \\
--tlscacert=/etc/docker/ca.crt \\ --tlscacert=/etc/docker/ca.crt \\
--tlskey=/etc/docker/server.key \\ --tlskey=/etc/docker/server.key \\
--tlscert=/etc/docker/server.crt \\ --tlscert=/etc/docker/server.crt \\
END_TLS END_TLS
fi fi
@ -41,8 +41,8 @@ cat >> /etc/systemd/system/swarm-manager.service << END_SERVICE_BOTTOM
ExecStop=/usr/bin/docker stop swarm-manager ExecStop=/usr/bin/docker stop swarm-manager
Restart=always Restart=always
ExecStartPost=/usr/bin/curl -k -i -X POST -H 'Content-Type: application/json' -H 'X-Auth-Token: $WAIT_HANDLE_TOKEN' \\ ExecStartPost=/usr/bin/curl -k -i -X POST -H 'Content-Type: application/json' -H 'X-Auth-Token: $WAIT_HANDLE_TOKEN' \\
--data-binary "'"'{"Status": "SUCCESS", "Reason": "Setup complete", "Data": "OK", "UniqueId": "00000"}'"'" \\ --data-binary "'"'{"Status": "SUCCESS", "Reason": "Setup complete", "Data": "OK", "UniqueId": "00000"}'"'" \\
"$WAIT_HANDLE_ENDPOINT" "$WAIT_HANDLE_ENDPOINT"
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View File

@ -5,7 +5,7 @@
echo "Configuring mesos (master)" echo "Configuring mesos (master)"
myip=$(ip addr show eth0 | myip=$(ip addr show eth0 |
awk '$1 == "inet" {print $2}' | cut -f1 -d/) awk '$1 == "inet" {print $2}' | cut -f1 -d/)
# Fix /etc/hosts # Fix /etc/hosts
sed -i "s/127.0.1.1/$myip/" /etc/hosts sed -i "s/127.0.1.1/$myip/" /etc/hosts

View File

@ -5,7 +5,7 @@
echo "Configuring mesos (slave)" echo "Configuring mesos (slave)"
myip=$(ip addr show eth0 | myip=$(ip addr show eth0 |
awk '$1 == "inet" {print $2}' | cut -f1 -d/) awk '$1 == "inet" {print $2}' | cut -f1 -d/)
zk="" zk=""
for master_ip in $MESOS_MASTERS_IPS; do for master_ip in $MESOS_MASTERS_IPS; do

View File

@ -6,6 +6,7 @@
# ascii betical order. # ascii betical order.
bandit>=1.1.0 # Apache-2.0 bandit>=1.1.0 # Apache-2.0
bashate>=0.2 # Apache-2.0
coverage>=4.0 # Apache-2.0 coverage>=4.0 # Apache-2.0
doc8 # Apache-2.0 doc8 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD fixtures>=3.0.0 # Apache-2.0/BSD

View File

@ -98,6 +98,13 @@ commands =
doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst
bash tools/flake8wrap.sh {posargs} bash tools/flake8wrap.sh {posargs}
bandit -r magnum -x tests -n5 -ll bandit -r magnum -x tests -n5 -ll
bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
-not \( -type d -name doc -prune \) \
-not \( -type d -name contrib -prune \) \
-type f \
-name \*.sh \
-print0 | xargs -0 bashate -v -iE006,E010,E042 -eE005"
[testenv:venv] [testenv:venv]
commands = {posargs} commands = {posargs}