Merge "Remove unused minor update code"

This commit is contained in:
Zuul 2018-03-19 12:34:21 +00:00 committed by Gerrit Code Review
commit 3eb0c62e47
6 changed files with 0 additions and 270 deletions

View File

@ -1,165 +0,0 @@
#!/bin/bash
# A heat-config-script which runs yum update during a stack-update.
# Inputs:
# deploy_action - yum will only be run if this is UPDATE
# update_identifier - yum will only run for previously unused values of update_identifier
# command - yum sub-command to run, defaults to "update"
# command_arguments - yum command arguments, defaults to ""
echo "Started yum_update.sh on server $deploy_server_id at `date`"
echo -n "false" > $heat_outputs_path.update_managed_packages
if [ -f /.dockerenv ]; then
echo "Not running due to running inside a container"
exit 0
fi
if [[ -z "$update_identifier" ]]; then
echo "Not running due to unset update_identifier"
exit 0
fi
timestamp_dir=/var/lib/overcloud-yum-update
mkdir -p $timestamp_dir
# sanitise to remove unusual characters
update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
# seconds to wait for this node to rejoin the cluster after update
cluster_start_timeout=600
galera_sync_timeout=1800
cluster_settle_timeout=1800
timestamp_file="$timestamp_dir/$update_identifier"
if [[ -a "$timestamp_file" ]]; then
echo "Not running for already-run timestamp \"$update_identifier\""
exit 0
fi
touch "$timestamp_file"
pacemaker_status=""
# We include word boundaries in order to not match pacemaker_remote
if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\bpacemaker\b'; then
pacemaker_status=$(systemctl is-active pacemaker)
fi
# (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid)
# This runs before the yum_update so we are guaranteed to run it even in the absence
# of packages to update (the check for -z "$update_identifier" guarantees that this
# is run only on overcloud stack update -i)
if [[ "$pacemaker_status" == "active" && \
"$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name | tr '[:upper:]' '[:lower:]')" == "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]] ; then \
# OCF scripts don't cope with -eu
echo "Verifying if we need to fix up any IPv6 VIPs"
set +eu
fixup_wrong_ipv6_vip
ret=$?
set -eu
if [ $ret -ne 0 ]; then
echo "Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)"
exit 1
fi
fi
command_arguments=${command_arguments:-}
# Always ensure yum has full cache
check_for_yum_lock
yum makecache || echo "Yum makecache failed. This can cause failure later on."
# yum check-update exits 100 if updates are available
check_for_yum_lock
set +e
check_update=$(yum check-update 2>&1)
check_update_exit=$?
set -e
if [[ "$check_update_exit" == "1" ]]; then
echo "Failed to check for package updates"
echo "$check_update"
exit 1
elif [[ "$check_update_exit" != "100" ]]; then
echo "No packages require updating"
exit 0
fi
# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
# Resolve any RPM dependency issues before attempting the update
check_for_yum_lock
yum_pre_update
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
if [[ "$node_count" == "1" ]] ; then
echo "Active node count is 1, stopping node with --force"
pcs cluster stop --force
else
pcs cluster stop
fi
else
echo "Upgrading Puppet modules and dependencies"
check_for_yum_lock
yum -q -y update puppet-tripleo
yum deplist puppet-tripleo | awk '/dependency/{print $2}' | xargs yum -q -y update
echo "Upgrading other packages is handled by config management tooling"
echo -n "true" > $heat_outputs_path.update_managed_packages
exit 0
fi
command=${command:-update}
full_command="yum -q -y $command $command_arguments"
echo "Running: $full_command"
check_for_yum_lock
result=$($full_command)
return_code=$?
echo "$result"
echo "yum return code: $return_code"
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
hostname=$(hostname -s)
tstart=$(date +%s)
while [[ "$(pcs status | grep "^Online" | grep -F -o $hostname)" == "" ]]; do
sleep 5
tnow=$(date +%s)
if (( tnow-tstart > cluster_start_timeout )) ; then
echo "ERROR $hostname failed to join cluster in $cluster_start_timeout seconds"
pcs status
exit 1
fi
done
RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
tstart=$(date +%s)
while ! clustercheck; do
sleep 5
tnow=$(date +%s)
if (( tnow-tstart > galera_sync_timeout )) ; then
echo "ERROR galera sync timed out"
exit 1
fi
done
fi
echo "Waiting for pacemaker cluster to settle"
if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
echo "ERROR timed out while waiting for the cluster to settle"
exit 1
fi
pcs status
fi
echo "Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code"
exit $return_code

View File

@ -1,34 +0,0 @@
heat_template_version: queens
description: >
Software-config for performing package updates using yum
resources:
config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
list_join:
- ''
- - get_file: pacemaker_common_functions.sh
- get_file: yum_update.sh
inputs:
- name: update_identifier
description: yum will only run for previously unused values of update_identifier
default: ''
- name: command
description: yum sub-command to run, defaults to "update"
default: update
- name: command_arguments
description: yum command arguments, defaults to ""
default: ''
outputs:
- name: update_managed_packages
description: boolean value indicating whether to upgrade managed packages
outputs:
OS::stack_id:
value: {get_resource: config}

View File

@ -1,29 +0,0 @@
heat_template_version: queens
description: 'No-op yum update task'
resources:
config:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: |
#!/bin/bash
echo -n "false" > $heat_outputs_path.update_managed_packages
inputs:
- name: update_identifier
description: yum will only run for previously unused values of update_identifier
default: ''
- name: command
description: yum sub-command to run, defaults to "update"
default: update
- name: command_arguments
description: yum command arguments, defaults to ""
default: ''
outputs:
- name: update_managed_packages
description: boolean value indicating whether to upgrade managed packages
outputs:
OS::stack_id:
value: {get_resource: config}

View File

@ -10,10 +10,6 @@ resource_registry:
OS::TripleO::DefaultPasswords: default_passwords.yaml
OS::TripleO::RandomString: OS::Heat::RandomString
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
{% for role in roles %}
OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
OS::TripleO::{{role.name}}PostDeploySteps: common/post.yaml

View File

@ -835,27 +835,11 @@ resources:
data: {get_attr: [{{primary_role_name}}, {{network.name_lower}}_ip_address]}
{%- endfor %}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
depends_on:
{% for role in roles %}
- {{role.name}}AllNodesDeployment
{% endfor %}
properties:
servers:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
input_values:
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
# Optional ExtraConfig for all nodes - all roles are passed in here, but
# the nested template may configure each role differently (or not at all)
AllNodesExtraConfig:
type: OS::TripleO::AllNodesExtraConfig
depends_on:
- UpdateWorkflow
{% for role in roles %}
- {{role.name}}AllNodesValidationDeployment
{% endfor %}

View File

@ -499,8 +499,6 @@ resources:
name: {{server_resource_name}}Deployment
config: {get_resource: {{server_resource_name}}Config}
server: {get_resource: {{server_resource_name}}}
input_values:
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
actions:
if:
- server_not_blacklisted
@ -548,7 +546,6 @@ resources:
- {get_param: {{server_resource_name}}ExtraConfig}
extraconfig: {get_param: ExtraConfig}
{{role.name.lower()}}:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
tripleo::clouddomain: {get_param: CloudDomain}
@ -602,25 +599,6 @@ resources:
properties:
server: {get_resource: {{server_resource_name}}}
UpdateConfig:
type: OS::TripleO::Tasks::PackageUpdate
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
server: {get_resource: {{server_resource_name}}}
input_values:
update_identifier:
get_param: UpdateIdentifier
actions:
if:
- server_not_blacklisted
- ['CREATE', 'UPDATE']
- []
DeploymentActions:
type: OS::Heat::Value
properties: