Merge "Add a function to upgrade from full HA to NG HA"

This commit is contained in:
Jenkins 2016-09-19 17:23:08 +00:00 committed by Gerrit Code Review
commit 5e3ad982a1
3 changed files with 137 additions and 16 deletions

View File

@ -18,6 +18,22 @@ check_disk_for_mysql_dump
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
pcs property set stonith-enabled=false
# Migrate to HA NG
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
migrate_full_to_ng_ha
fi
# After migrating the cluster to HA-NG the services not under pacemaker's control
# are still up and running. We need to stop them explicitely otherwise during the yum
# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
# is going to take a long time because rabbit is down. By having the service stopped
# systemctl try-restart is a noop
for $service in $(services_to_migrate); do
manage_systemd_service stop "${service%%-clone}"
check_resource_systemd "${service%%-clone}" stopped 600
done
# In case the mysql package is updated, the database on disk must be
# upgraded as well. This typically needs to happen during major
# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@ -36,8 +52,6 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
pcs resource disable httpd
check_resource httpd stopped 1800
pcs resource disable redis
check_resource redis stopped 600
pcs resource disable rabbitmq
@ -53,14 +67,6 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
pcs cluster stop --all
fi
stop_or_disable_service mongod
check_resource mongod stopped 600
stop_or_disable_service memcached
check_resource memcached stopped 600
# Swift isn't controled by pacemaker
systemctl_swift stop

View File

@ -32,8 +32,6 @@ fi
start_or_enable_service galera
check_resource galera started 600
start_or_enable_service mongod
check_resource mongod started 600
if [[ -n $(is_bootstrap_node) ]]; then
tstart=$(date +%s)
@ -59,14 +57,18 @@ if [[ -n $(is_bootstrap_node) ]]; then
# sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
fi
start_or_enable_service memcached
check_resource memcached started 600
start_or_enable_service rabbitmq
check_resource rabbitmq started 600
start_or_enable_service redis
check_resource redis started 600
start_or_enable_service httpd
check_resource httpd started 1800
# Swift isn't controled by pacemaker
systemctl_swift start
# We need to start the systemd services we explicitely stopped at step _1.sh
# FIXME: Should we let puppet during the convergence step do the service enabling or
# should we add it here?
for $service in $(services_to_migrate); do
manage_systemd_service stop "${service%%-clone}"
check_resource_systemd "${service%%-clone}" started 600
done

View File

@ -56,3 +56,116 @@ function is_mysql_upgrade_needed {
fi
echo "1"
}
# This function returns the list of services to be migrated away from pacemaker
# and to systemd. The reason to have these services in a separate function is because
# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
# and in the function to migrate the cluster from full HA to HA NG
function services_to_migrate {
# The following PCMK resources the ones the we are going to delete
PCMK_RESOURCE_TODELETE="
httpd-clone
memcached-clone
mongod-clone
neutron-dhcp-agent-clone
neutron-l3-agent-clone
neutron-metadata-agent-clone
neutron-netns-cleanup-clone
neutron-openvswitch-agent-clone
neutron-ovs-cleanup-clone
neutron-server-clone
openstack-aodh-evaluator-clone
openstack-aodh-listener-clone
openstack-aodh-notifier-clone
openstack-ceilometer-api-clone
openstack-ceilometer-central-clone
openstack-ceilometer-collector-clone
openstack-ceilometer-notification-clone
openstack-cinder-api-clone
openstack-cinder-scheduler-clone
openstack-glance-api-clone
openstack-glance-registry-clone
openstack-gnocchi-metricd-clone
openstack-gnocchi-statsd-clone
openstack-heat-api-cfn-clone
openstack-heat-api-clone
openstack-heat-api-cloudwatch-clone
openstack-heat-engine-clone
openstack-nova-api-clone
openstack-nova-conductor-clone
openstack-nova-consoleauth-clone
openstack-nova-novncproxy-clone
openstack-nova-scheduler-clone
openstack-sahara-api-clone
openstack-sahara-engine-clone
"
echo $PCMK_RESOURCE_TODELETE
}
# This function will migrate a mitaka system where all the resources are managed
# via pacemaker to a newton setup where only a few services will be managed by pacemaker
# On a high-level it will operate as follows:
# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
# during the conversion
# 2. Remove all the colocation constraints and then the ordering constraints, except the
# ones related to haproxy/VIPs which exist in Newton as well
# 3. Remove all the resources that won't be managed by pacemaker in newton. Note that they
# will show up as ORPHANED but they will keep running normally via systemd. They will be
# enabled to start at boot by puppet during the converge step
# 4. Take the cluster out of maintenance-mode and do a resource cleanup
function migrate_full_to_ng_ha {
if [[ -n $(pcmk_running) ]]; then
pcs property set maintenance-mode=true
# We are making sure here that the property has propagated everywhere
if ! timeout -k 10 300 crm_resource --wait; then
echo_error "ERROR: cluster remained unstable after setting maintenance-mode for more than 300 seconds, exiting."
exit 1
fi
# First we go through all the colocation constraints (except the ones we want to keep, i.e. the haproxy/ip ones)
# and we remove those
COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
for constraint in $COL_CONSTRAINTS; do
log_debug "Deleting colocation constraint $constraint from CIB"
pcs constraint remove "$constraint"
done
# Now we kill all the ordering constraints (except the haproxy/ip ones)
ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
for constraint in $ORD_CONSTRAINTS; do
log_debug "Deleting ordering constraint $constraint from CIB"
pcs constraint remove "$constraint"
done
# At this stage there are no constraints whatsoever except the haproxy/ip ones
# which we want to keep. We now delete each resource that will move to systemd
# Note that the corresponding systemd resource will stay running, which means that
# later when we do the "yum update", things will be a bit slower because each
# "systemctl try-restart <service>" is not a no-op any longer because the service is up
# and running and it will be restarted with rabbitmq being down.
PCS_STATUS_OUTPUT="$(pcs status)"
for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
log_debug "Deleting $resource from the CIB"
# We need to add --force because the cluster is in maintenance mode and the resource
# is unmanaged. The if serves to make this idempotent
pcs resource delete --force "$resource"
else
log_debug "Service $service not found as a pacemaker resource, not trying to delete."
fi
done
# At this stage all the pacemaker resources are removed from the CIB. Once we remove the
# maintenance-mode those systemd resources will keep on running. They shall be systemd enabled
# via the puppet converge step later on
pcs property set maintenance-mode=false
# We need to do a pcs resource cleanup here + crm_resource --wait to make sure the
# cluster is in a clean state before we stop everything, upgrade and restart everything
pcs resource cleanup
# We are making sure here that the cluster is stable before proceeding
if ! timeout -k 10 600 crm_resource --wait; then
echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
exit 1
fi
fi
}