43b33c7ed8
In change I2aae4e2fdfec526c835f8967b54e1db3757bca17 we did the
following:
-pacemaker_status=$(systemctl is-active pacemaker || :)
+pacemaker_status=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker;
then
+ pacemaker_status=$(systemctl is-active pacemaker)
+fi
we did that so due to LP#1668266: we did not want systemctl is-active to
fail on non pacemaker nodes. The problem with the above hiera check is
that it will match on pacemaker_remote nodes as well.
We cannot piggyback the pacemaker_enabled hiera key because that is true
on all nodes. So let's make the test check only for pacemaker service
without matching pacemaker remote. Tested with:
1) Test on a controller node with pacemaker service enabled
[root@overcloud-controller-0 ~]# hiera -c /etc/puppet/hiera.yaml -a service_names |grep '\bpacemaker\b'
"pacemaker",
[root@overcloud-controller-0 ~]# echo $?
0
2) Test on a compute node without pacemaker:
[root@overcloud-novacompute-0 puppet]# hiera -c /etc/puppet/hiera.yaml service_names |grep '\bpacemaker\b'
[root@overcloud-novacompute-0 puppet]# echo $?
1
3) Test on a node with pacemaker_remote in the service_names key:
[root@overcloud-novacompute-0 puppet]# hiera -c /etc/puppet/hiera.yaml service_names |grep '\bpacemaker\b'
[root@overcloud-novacompute-0 puppet]# echo $?
1
[root@overcloud-novacompute-0 puppet]# hiera -c /etc/puppet/hiera.yaml service_names |grep '\bpacemaker_remote\b'
"pacemaker_remote"]
[root@overcloud-novacompute-0 puppet]# echo $?
0
NB: cherry-pick was not 100% clean due to unrelated lines being cleaned
up in master.
Change-Id: I54c5756ba6dea791aef89a79bc0b538ba02ae48a
Closes-Bug: #1688214
(cherry picked from commit 2244290424
)
177 lines
5.9 KiB
Bash
Executable File
177 lines
5.9 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# A heat-config-script which runs yum update during a stack-update.
|
|
# Inputs:
|
|
# deploy_action - yum will only be run if this is UPDATE
|
|
# update_identifier - yum will only run for previously unused values of update_identifier
|
|
# command - yum sub-command to run, defaults to "update"
|
|
# command_arguments - yum command arguments, defaults to ""
|
|
|
|
echo "Started yum_update.sh on server $deploy_server_id at `date`"
|
|
echo -n "false" > $heat_outputs_path.update_managed_packages
|
|
|
|
if [ -f /.dockerenv ]; then
|
|
echo "Not running due to running inside a container"
|
|
exit 0
|
|
fi
|
|
|
|
if [[ -z "$update_identifier" ]]; then
|
|
echo "Not running due to unset update_identifier"
|
|
exit 0
|
|
fi
|
|
|
|
timestamp_dir=/var/lib/overcloud-yum-update
|
|
mkdir -p $timestamp_dir
|
|
|
|
# sanitise to remove unusual characters
|
|
update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
|
|
|
|
# seconds to wait for this node to rejoin the cluster after update
|
|
cluster_start_timeout=600
|
|
galera_sync_timeout=1800
|
|
cluster_settle_timeout=1800
|
|
|
|
timestamp_file="$timestamp_dir/$update_identifier"
|
|
if [[ -a "$timestamp_file" ]]; then
|
|
echo "Not running for already-run timestamp \"$update_identifier\""
|
|
exit 0
|
|
fi
|
|
touch "$timestamp_file"
|
|
|
|
pacemaker_status=""
|
|
# We include word boundaries in order to not match pacemaker_remote
|
|
if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\bpacemaker\b'; then
|
|
pacemaker_status=$(systemctl is-active pacemaker)
|
|
fi
|
|
|
|
# (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid)
|
|
# This runs before the yum_update so we are guaranteed to run it even in the absence
|
|
# of packages to update (the check for -z "$update_identifier" guarantees that this
|
|
# is run only on overcloud stack update -i)
|
|
if [[ "$pacemaker_status" == "active" && \
|
|
"$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
|
|
# OCF scripts don't cope with -eu
|
|
echo "Verifying if we need to fix up any IPv6 VIPs"
|
|
set +eu
|
|
fixup_wrong_ipv6_vip
|
|
ret=$?
|
|
set -eu
|
|
if [ $ret -ne 0 ]; then
|
|
echo "Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
command_arguments=${command_arguments:-}
|
|
|
|
# yum check-update exits 100 if updates are available
|
|
set +e
|
|
check_update=$(yum check-update 2>&1)
|
|
check_update_exit=$?
|
|
set -e
|
|
|
|
if [[ "$check_update_exit" == "1" ]]; then
|
|
echo "Failed to check for package updates"
|
|
echo "$check_update"
|
|
exit 1
|
|
elif [[ "$check_update_exit" != "100" ]]; then
|
|
echo "No packages require updating"
|
|
exit 0
|
|
fi
|
|
|
|
# TODO: FIXME: remove this in Pike.
|
|
# Hack around mod_ssl update and puppet https://bugs.launchpad.net/tripleo/+bug/1682448
|
|
touch /etc/httpd/conf.d/ssl.conf
|
|
|
|
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
|
|
# and https://bugs.launchpad.net/tripleo/+bug/1634851
|
|
if [[ "$pacemaker_status" == "active" && \
|
|
"$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]] ; then
|
|
if pcs resource show rabbitmq | grep -E "start.*timeout=100"; then
|
|
pcs resource update rabbitmq op start timeout=200s
|
|
fi
|
|
if pcs resource show rabbitmq | grep -E "stop.*timeout=90"; then
|
|
pcs resource update rabbitmq op stop timeout=200s
|
|
fi
|
|
if pcs resource show redis | grep -E "start.*timeout=120"; then
|
|
pcs resource update redis op start timeout=200s
|
|
fi
|
|
if pcs resource show redis | grep -E "stop.*timeout=120"; then
|
|
pcs resource update redis op stop timeout=200s
|
|
fi
|
|
fi
|
|
|
|
# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
|
|
special_case_ovs_upgrade_if_needed
|
|
|
|
if [[ "$pacemaker_status" == "active" ]] ; then
|
|
echo "Pacemaker running, stopping cluster node and doing full package update"
|
|
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
|
|
if [[ "$node_count" == "1" ]] ; then
|
|
echo "Active node count is 1, stopping node with --force"
|
|
pcs cluster stop --force
|
|
else
|
|
pcs cluster stop
|
|
fi
|
|
else
|
|
echo "Upgrading openstack-puppet-modules and its dependencies"
|
|
yum -q -y update openstack-puppet-modules
|
|
yum deplist openstack-puppet-modules | awk '/dependency/{print $2}' | xargs yum -q -y update
|
|
echo "Upgrading other packages is handled by config management tooling"
|
|
echo -n "true" > $heat_outputs_path.update_managed_packages
|
|
exit 0
|
|
fi
|
|
|
|
command=${command:-update}
|
|
full_command="yum -q -y $command $command_arguments"
|
|
echo "Running: $full_command"
|
|
|
|
result=$($full_command)
|
|
return_code=$?
|
|
echo "$result"
|
|
echo "yum return code: $return_code"
|
|
|
|
if [[ "$pacemaker_status" == "active" ]] ; then
|
|
echo "Starting cluster node"
|
|
pcs cluster start
|
|
|
|
hostname=$(hostname -s)
|
|
tstart=$(date +%s)
|
|
while [[ "$(pcs status | grep "^Online" | grep -F -o $hostname)" == "" ]]; do
|
|
sleep 5
|
|
tnow=$(date +%s)
|
|
if (( tnow-tstart > cluster_start_timeout )) ; then
|
|
echo "ERROR $hostname failed to join cluster in $cluster_start_timeout seconds"
|
|
pcs status
|
|
exit 1
|
|
fi
|
|
done
|
|
|
|
RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
|
|
|
|
if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
|
|
tstart=$(date +%s)
|
|
while ! clustercheck; do
|
|
sleep 5
|
|
tnow=$(date +%s)
|
|
if (( tnow-tstart > galera_sync_timeout )) ; then
|
|
echo "ERROR galera sync timed out"
|
|
exit 1
|
|
fi
|
|
done
|
|
fi
|
|
|
|
echo "Waiting for pacemaker cluster to settle"
|
|
if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
|
|
echo "ERROR timed out while waiting for the cluster to settle"
|
|
exit 1
|
|
fi
|
|
|
|
pcs status
|
|
fi
|
|
|
|
|
|
echo "Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code"
|
|
|
|
exit $return_code
|