Weed out unused bash code

Change-Id: I5910c2f7bad7d1f0dfa1ec6c8b760892e7003109
This commit is contained in:
Yuriy Taraday 2015-09-02 12:54:30 +03:00
parent 925b76c61a
commit 03b49db06a
12 changed files with 1 additions and 919 deletions

View File

@ -1,85 +0,0 @@
#!/usr/bin/python
import sys
import yaml
def copy_ifaces(src, dst):
def pull(ifaces):
for iface in ifaces:
yield (iface['name'],
iface['assigned_networks'])
def push(ifaces, assignments, nets):
for iface in ifaces:
networks = assignments.get(iface['name'], [])
networks = [{'id': nets[net['name']],
'name': net['name']} for net in networks]
yield dict(iface,
assigned_networks=networks,
)
nets = {}
for iface in dst:
nets.update(dict([(net['name'], net['id'])
for net in iface['assigned_networks']]))
assignments = pull(src)
ifaces = push(dst, dict(assignments), nets)
yaml.dump(list(ifaces), stream=sys.stdout, default_flow_style=False)
def by_extra(disk):
return ''.join(sorted(disk['extra']))
def by_name(disk):
return disk['name']
KEY_FUNCS = {
'by_extra': by_extra,
'by_name': by_name,
}
def copy_disks(src, dst, method):
key_func = KEY_FUNCS[method]
def pull(disks):
for disk in disks:
yield (key_func(disk),
disk['volumes'])
def push(disks1, disks2):
def to_dict(attrs):
return dict((key_func(attr), attr) for attr in attrs)
dict_disks1 = to_dict(disks1)
for extra, volumes in disks2:
dict_disks1[extra].update(volumes=volumes)
yield dict_disks1[extra]
fixture_disks = pull(src)
disks = push(dst, fixture_disks)
yaml.dump(list(disks), stream=sys.stdout, default_flow_style=False)
FUNCTIONS = {
'disks': copy_disks,
'interfaces': copy_ifaces
}
def main():
function = FUNCTIONS[sys.argv[1]]
settings = yaml.load(open(sys.argv[2]))
fixture = yaml.load(open(sys.argv[3]))
function(fixture, settings, *sys.argv[4:])
if __name__ == '__main__':
main()

View File

@ -13,9 +13,7 @@ export PATCH_DIR="$CWD/patches"
export SERVICE_TENANT_ID=""
. ${LIBPATH}/utils.sh
. ${LIBPATH}/nailgundb.sh
. ${LIBPATH}/functions.sh
. ${LIBPATH}/db.sh
. ${LIBPATH}/maintenance.sh
. ${LIBPATH}/patch.sh
. ${LIBPATH}/ceph.sh

View File

@ -1,25 +0,0 @@
#!/bin/sh -e
PATCH=${1-"../patches/neutron-upgrade.patch"}
if [ ! -f "$PATCH" ]; then
echo "Usage $0 neutron-upgrade.patch > neutron-upgrade-rendered.patch" >> /dev/stderr
exit 1
fi
extract_vars() {
sed -re '/^\+.*%.*/ s/.*%([^%]+)%.*/\L\1/;tx;d;:x' $PATCH
}
convert_vars_to_regex() {
tr "\n" " "| sed -re 's,^,^(,;s,.$,),;s, ,|,g'
}
generate_template_regex() {
find /etc/neutron -type f | xargs -I{} egrep "`extract_vars | convert_vars_to_regex`" {} | awk -F= '{key = gensub(" ", "", "g", $1); printf("s|%%%s%%|%s|g;", toupper(key), $2)}'
}
sed -r "`generate_template_regex`" ${PATCH}

View File

@ -1,18 +0,0 @@
#!/usr/bin/python
import sys
import yaml
def main():
settings = yaml.load(open(sys.argv[1]))
for disk in settings:
for vol in disk["volumes"]:
if vol["name"] == "ceph":
vol.update({'keep': True})
yaml.dump(list(settings), stream=sys.stdout, default_flow_style=False)
if __name__ == '__main__':
main()

View File

@ -50,9 +50,7 @@ case $1 in
upgrade_ceph $2 $3
;;
upgrade-node)
[ -z "$2" ] && die "$(usage)"
[ -z "$3" ] && die "$(usage)"
shift && upgrade_node "$@"
pycmd "$@"
;;
upgrade-nova-compute)
# TODO(ogelbukh) delete as obsoleted by upgrade-cics command.

View File

@ -165,40 +165,3 @@ EOF
done
}
set_osd_noout() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
ssh root@$(list_nodes $1 'controller' | head -1) ceph osd set noout
}
unset_osd_noout() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
ssh root@$(list_nodes $1 'controller' | head -1) ceph osd unset noout
}
check_ceph_cluster() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -z "$(ssh root@node-$1 ceph health | grep HEALTH_OK)" ] && \
die "Ceph cluster is unhealthy, exiting"
}
patch_osd_node() {
[ -z "$1" ] && die "No node ID provided, exiting"
cd ${PATCH_DIR}/pman/
./update_node.sh node-$1
cd $OLDPWD
}
prepare_osd_node_upgrade() {
[ -z "$1" ] && die "No node ID provided, exiting"
patch_osd_node "$@"
keep_ceph_partition "$@"
}
restart_mon_init() {
[ -z "$1" ] && die "No env ID provided, exiting"
for n in $(list_nodes $1 controller); do
ssh root@$(get_host_ip_by_node_id ${n#node-}) "stop ceph-mon id=${n};
/etc/init.d/ceph start mon" ||
die "Cannot restart Ceph MON on node ${n}, exiting"
done
}

View File

@ -1,74 +0,0 @@
#!/bin/bash -xe
disable_wsrep() {
[ -z "$1" ] && die "No node ID provided, exiting, exiting"
ssh root@$(get_host_ip_by_node_id $1) "echo \"SET GLOBAL wsrep_on='off';\" | mysql"
}
enable_wsrep() {
[ -z "$1" ] && die "No node ID provided, exiting"
ssh root@$(get_host_ip_by_node_id $1) "echo \"SET GLOBAL wsrep_on='ON';\" | mysql"
}
xtrabackup_install() {
[ -z "$1" ] && die "No node ID provided, exiting"
ssh root@$(get_host_ip_by_node_id $1) "apt-get -y install percona-xtrabackup"
}
xtrabackup_stream_from_node() {
[ -z "$1" ] && die "No backup source node ID provided, exiting"
ssh root@$(get_host_ip_by_node_id $1) "xtrabackup --backup --stream=tar ./ | gzip " \
| cat - > $FUEL_CACHE/dbs.original.tar.gz
}
xtrabackup_from_env() {
[ -z "$1" ] && die "No env ID provided, exiting"
local node=$(list_nodes $1 controller | head -1)
node=${node#node-}
xtrabackup_install $node
disable_wsrep $node
xtrabackup_stream_from_node $node
enable_wsrep $node
}
xtrabackup_restore_to_env() {
[ -z "$1" ] && die "No env ID provided, exiting"
local cic
local cics="$(list_nodes $1 controller)"
for cic in $(echo "$cics");
do
ssh root@$(get_host_ip_by_node_id ${cic#node-}) \
"mv /var/lib/mysql/grastate.dat /var/lib/mysql/grastate.old"
done
local primary_cic=$(echo "$cics" | head -1)
scp $FUEL_CACHE/dbs.original.tar.gz \
root@$(get_host_ip_by_node_id ${primary_cic#node-}):/var/lib/mysql
ssh root@$(get_host_ip_by_node_id ${primary_cic#node-}) \
"cd /var/lib/mysql;
tar -zxvf dbs.original.tar.gz;
chown -R mysql:mysql /var/lib/mysql;
export OCF_RESOURCE_INSTANCE=p_mysql;
export OCF_ROOT=/usr/lib/ocf;
export OCF_RESKEY_socket=/var/run/mysqld/mysqld.sock;
export OCF_RESKEY_additional_parameters="\""--wsrep-new-cluster"\"";
/usr/lib/ocf/resource.d/fuel/mysql-wss start;"
for cic in $(echo "$cics" | grep -v $primary_cic);
do
ssh root@$(get_host_ip_by_node_id ${cic#node-}) \
"export OCF_RESOURCE_INSTANCE=p_mysql;
export OCF_ROOT=/usr/lib/ocf;
export OCF_RESKEY_socket=/var/run/mysqld/mysqld.sock;
/usr/lib/ocf/resource.d/fuel/mysql-wss start;"
done
db_sync ${primary_cic#node-}
}
db_sync() {
[ -z "$1" ] && die "No node ID provided, exiting"
ssh root@$(get_host_ip_by_node_id $1) "keystone-manage db_sync;
nova-manage db sync;
heat-manage db_sync;
neutron-db-manage --config-file=/etc/neutron/neutron.conf upgrade head;
glance-manage db upgrade;
cinder-manage db sync"
}

View File

@ -14,221 +14,6 @@ pycmd() {
exit $?
}
get_service_tenant_id() {
[ -z "$1" ] && die "No node ID provided, exiting"
local env=$(get_env_by_node $1)
local filename="${FUEL_CACHE}/env-${env}-service-tenant-id"
if [ -f "$filename" ]; then
SERVICE_TENANT_ID=$(cat $filename)
else
SERVICE_TENANT_ID=$(ssh root@$(get_host_ip_by_node_id $1) ". openrc;
keystone tenant-get services \
| awk -F\| '\$2 ~ /id/{print \$3}' | tr -d \ ")
fi
[ -z "$SERVICE_TENANT_ID" ] &&
die "Cannot determine service tenant ID for node $1, exiting"
echo $SERVICE_TENANT_ID > $filename
}
get_deployment_info() {
local cmd
# Download deployment config from Fuel master for environment ENV to subdir in
# current directory.
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] || mkdir -p "$FUEL_CACHE"
[ -d "${FUEL_CACHE}/deployment_$1" ] && rm -r ${FUEL_CACHE}/deployment_$1
cmd=${2:-default}
fuel deployment --env $1 --$cmd --dir ${FUEL_CACHE}
}
get_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] || mkdir -p "$FUEL_CACHE"
fuel env --env $1 --deployment-task --download --dir ${FUEL_CACHE}
}
upload_deployment_info() {
# Upload deployment configration with modifications to Fuel master for
# environment ENV.
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
fuel deployment --env $1 --upload --dir $FUEL_CACHE
}
backup_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
[ -d "${FUEL_CACHE}/cluster_$1" ] &&
cp -pR "${FUEL_CACHE}/cluster_$1" "${FUEL_CACHE}/cluster_$1.orig"
}
upload_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
fuel env --env $1 --deployment-task --upload --dir $FUEL_CACHE
}
backup_deployment_info() {
[ -z "$1" ] && die "No env ID provided, exiting"
[ -d "${FUEL_CACHE}/deployment_$1" ] && {
[ -d "${FUEL_CACHE}/deployment_$1.orig" ] || mkdir "${FUEL_CACHE}/deployment_$1.orig"
cp -R ${FUEL_CACHE}/deployment_$1/*.yaml ${FUEL_CACHE}/deployment_$1.orig/
}
}
remove_patch_transformations() {
# Remove add-patch actions for br-ex, br-mgmt bridges. Required to isolate new
# controllers from original environment while physically connected to the same
# L2 segment.
[ -z "$1" ] && die "No env ID provided, exiting"
python ${HELPER_PATH}/transformations.py ${FUEL_CACHE}/deployment_$1 remove_patch_ports
}
remove_physical_transformations(){
[ -z "$1" ] && die "No env ID provided, exiting"
python ${HELPER_PATH}/transformations.py ${FUEL_CACHE}/deployment_$1 \
remove_physical_ports
}
disable_ping_checker() {
[ -z "$1" ] && die "No env ID provided, exiting"
[ -d "${FUEL_CACHE}/deployment_$1" ] || die "Deployment info directory not found, exiting"
ls ${FUEL_CACHE}/deployment_$1/** | xargs -I@ sh -c "echo 'run_ping_checker: false' >> @"
}
skip_deployment_tasks() {
[ -z "$1" ] && die "No env ID provided, exiting"
[ -d "${FUEL_CACHE}/cluster_$1" ] || die "Cluster info directory not found, exiting"
python ${HELPER_PATH}/tasks.py ${FUEL_CACHE}/cluster_$1 skip_tasks
}
prepare_seed_deployment_info() {
[ -z "$1" ] && "No seed env ID provided, exiting"
disable_ping_checker $1
remove_predefined_networks $1
reset_gateways_admin $1
skip_deployment_tasks $1
}
merge_deployment_info() {
# Merges default and current deployment info for the given environment.
[ -z "$1" ] && die "no env ID provided, exiting"
local infodir="${FUEL_CACHE}/deployment_$1"
[ -d "$infodir" ] || die "directory $infodir not found, exiting"
mv "${infodir}" "${infodir}.default"
get_deployment_info $1 download
[ -d "${infodir}" ] || mkdir ${infodir}
mv ${infodir}.default/* ${infodir}/ &&
rmdir ${infodir}.default
}
remove_predefined_networks() {
[ -z "$1" ] && die "No env ID provided, exiting"
python $HELPER_PATH/transformations.py ${FUEL_CACHE}/deployment_$1 remove_predefined_nets
}
reset_gateways_admin() {
[ -z "$1" ] && die "No env ID provided, exiting"
python ${HELPER_PATH}/transformations.py \
${FUEL_CACHE}/deployment_$1 reset_gw_admin
}
create_ovs_bridges() {
local nodes
local node
local br_name
[ -z "$1" ] && die "No env ID provided, exiting"
nodes=$(list_nodes $1 '(controller)')
for node in $nodes
do
ssh root@$node apt-get -y install openvswitch-switch
[ $? -ne 0 ] && die "Cannot install openvswitch, exiting"
for br_name in br-ex br-mgmt
do
ssh root@$node ovs-vsctl add-br $br_name
ssh root@$node ip link set dev $br_name mtu 1450
done
done
}
tunnel_from_to() {
# Configure GRE tunnels between 2 nodes. Nodes are specified by their hostnames
# (e.g. node-2). Every tunnel must have unique key to avoid conflicting
# configurations.
local src_node
local dst_node
local br_name
local remote_ip
local gre_port
local key
[ -z "$1" ] && die "No tunnel paramters provided, exiting"
src_node=$1
[ -z "$2" ] && die "No tunnel remote parameters provided, exiting"
dst_node=$2
[ -z "$3" ] && die "No bridge name provided, exiting"
br_name=$3
key=${4:-0}
remote_ip=$(host $dst_node | grep -Eo '([0-9\.]+)$')
[ -z "$remote_ip" ] && die "Tunnel remote host $dst_node not found, exiting"
gre_port=$br_name--gre-$dst_node
ssh root@$src_node ovs-vsctl add-port $br_name $gre_port -- \
set Interface $gre_port type=gre options:remote_ip=$remote_ip \
options:key=$key
}
create_tunnels() {
# Create tunnels between nodes in the new environment to ensure isolation from
# management and public network of original environment and retain connectivity
# in the 6.0 environment.
local br_name
local primary
local nodes
local node
[ -z "$1" ] && die "No env ID provided, exiting"
br_name=$2
roles_re=${3:-'controller'}
nodes=$(list_nodes $1 "$roles_re")
primary=$(echo $nodes | cut -d ' ' -f1)
for node in $nodes
do
[ "$node" == "$primary" ] || {
tunnel_from_to $primary $node $br_name $KEY
tunnel_from_to $node $primary $br_name $KEY
KEY=$(expr $KEY + 1)
}
done
}
env_action() {
# Start deployment or provisioning of all nodes in the environment, depending on
# second argument. First argument is an ID of env.
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
local env=$1 && shift
local action=$1 && shift
local node_ids="$@"
fuel node --env $env --$action --node $node_ids
[ $? -ne 0 ] && die "Cannot start $action for env $env, exiting" 2
}
check_neutron_agents() {
[ -z "$1" ] && die "No env ID provided, exiting"
local l3_nodes=$(fuel2 node list -c roles -c ip | awk -F\| '$2~/controller/{print($3)}' \
| tr -d ' ' | xargs -I{} ssh root@{} "ps -ef | grep -v \$\$ \
| grep -q neutron-l3-agent && echo \$(hostname)" 2>/dev/null)
local dhcp_nodes=$(fuel2 node list -c roles -c ip | awk -F\| '$2~/controller/{print($3)}' \
| tr -d ' ' | xargs -I{} ssh root@{} "ps -ef | grep -v \$\$ \
| grep -q neutron-l3-agent && echo \$(hostname)" 2>/dev/null)
for n in $l3_nodes;
do
[ "${n#node-}" == "$1" ] && exit 1
done
for n in $dhcp_nodes;
do
[ "${n#node-}" == "$1" ] && exit 1
done
}
check_deployment_status() {
# Verify operational status of environment.
[ -z "$1" ] && die "No env ID provided, exiting"
@ -238,45 +23,6 @@ check_deployment_status() {
[ "$status" == 'new' ] || die "Environment is not operational, exiting"
}
discover_nodes_to_cics() {
[ -z "$1" ] && die "No env ID provided, exiting"
local node_ids=$(fuel node | awk -F\| '$2~/discover/{print($1)}' \
| tr -d ' ' | sed ':a;N;$!ba;s/\n/,/g')
fuel node set --env $1 --node $node_ids --role controller
}
delete_tunnel() {
# Delete tunnel between src_node and dst_node.
local src_node
local dst_node
local br_name
local gre_port
[ -z "$1" ] && die "No tunnel parameters provided, exiting"
src_node=$1
[ -z "$2" ] && die "Bridge name not specified"
br_name=$2
for gre_port in $(list_ports $src_node $br_name | grep $br_name--gre)
do
echo $gre_port \
| xargs -I{} ssh root@$src_node ovs-vsctl del-port $br_name {}
[ $? -ne 0 ] && die "Cannot delete GRE port, exiting"
done
}
remove_tunnels() {
# Delete tunnels from 6.0 CICs to replace 5.1 controllers.
local br_name
local nodes
local node
[ -z "$1" ] && die "No env ID provided, exiting"
br_name=$2
nodes=$(list_nodes $1 'controller')
for node in $nodes
do
delete_tunnel $node $br_name
done
}
list_ports() {
# On the host identified by first argument, list ports in bridge, identified by
# second argument.
@ -322,216 +68,6 @@ delete_patch_ports() {
done
}
apply_disk_settings() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -f "${FUEL_CACHE}/disks.fixture.yaml" ] || die "No disks fixture provided, exiting"
local disk_file="${FUEL_CACHE}/node_$1/disks.yaml"
fuel node --node $1 --disk --download --dir $FUEL_CACHE
${BINPATH}/copy-node-settings disks $disk_file ${FUEL_CACHE}/disks.fixture.yaml by_name \
> /tmp/disks_$1.yaml
mv /tmp/disks_$1.yaml $disk_file
fuel node --node $1 --disk --upload --dir $FUEL_CACHE
}
apply_network_settings() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -f "${FUEL_CACHE}/interfaces.fixture.yaml" ] || die "No interfaces fixture provided, exiting"
local iface_file="${FUEL_CACHE}/node_$1/interfaces.yaml"
fuel node --node $1 --network --download --dir $FUEL_CACHE
${BINPATH}/copy-node-settings interfaces $iface_file \
${FUEL_CACHE}/interfaces.fixture.yaml > /tmp/interfaces_$1.yaml
mv /tmp/interfaces_$1.yaml $iface_file
fuel node --node $1 --network --upload --dir $FUEL_CACHE
}
keep_ceph_partition() {
[ -z "$1" ] && die "No node ID provided, exiting"
local disk_file="${FUEL_CACHE}/node_$1/disks.yaml"
fuel node --node $1 --disk --download --dir ${FUEL_CACHE}
${BINPATH}/keep-ceph-partition $disk_file \
> /tmp/disks-ceph-partition.yaml
mv /tmp/disks-ceph-partition.yaml $disk_file
fuel node --node $1 --disk --upload --dir ${FUEL_CACHE}
}
get_node_settings() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -d "$FUEL_CACHE" ] || mkdir -p "$FUEL_CACHE"
fuel node --node $1 --network --download --dir $FUEL_CACHE
fuel node --node $1 --disk --download --dir $FUEL_CACHE
}
prepare_fixtures_from_node() {
[ -z "$1" ] && die "No node ID provided, exiting"
get_node_settings $1
mv ${FUEL_CACHE}/node_$1/disks.yaml ${FUEL_CACHE}/disks.fixture.yaml
mv ${FUEL_CACHE}/node_$1/interfaces.yaml ${FUEL_CACHE}/interfaces.fixture.yaml
rmdir ${FUEL_CACHE}/node_$1
}
upload_node_settings() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -d "${FUEL_CACHE}/node_$1" ] || die "Local node settings not found, exiting"
fuel node --node $1 --network --upload --dir $FUEL_CACHE
fuel node --node $1 --disk --upload --dir $FUEL_CACHE
}
assign_node_to_env() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -z "$2" ] && die "No seed env ID provided, exiting"
local roles=$(fuel node --node $1 \
| awk -F\| '/^'$1'/ {gsub(" ", "", $7);print $7}')
local orig_id=$(get_env_by_node $1)
if [ "$orig_id" != "None" ]; then
fuel2 env move node $1 $2 ||
die "Cannot move node $1 to env $2, exiting"
wait_for_node $1 discover
else
die "Cannot upgrade unallocated node $1"
#fuel node --node $1 --env $2 set --role ${roles:-controller}
fi
}
prepare_compute_upgrade() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
cic=$(list_nodes $1 controller | head -1)
scp ${BINPATH}/host_evacuation.sh root@$cic:/var/tmp/
ssh root@$cic "/var/tmp/host_evacuation.sh node-$2"
}
cleanup_compute_upgrade() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
cic=$(list_nodes $1 controller | head -1)
ssh root@$cic "source openrc; nova service-enable node-$2 nova-compute"
}
prepare_controller_upgrade() {
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
#Required for updating tenant ID in Neutron config on 6.1
get_service_tenant_id $2
}
upgrade_node_preprovision() {
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
local roles=$(fuel node --node $2 \
| awk -F\| '$1~/^'$2'/ {gsub(" ", "", $7);print $7}' \
| sed -re 's%,% %')
# Pre-upgrade checks
for role in $roles; do
case $role in
ceph-osd)
check_ceph_cluster $2
;;
esac
done
# Prepare to provisioning
for role in $roles
do
case $role in
compute)
prepare_compute_upgrade "$@"
;;
ceph-osd)
prepare_osd_node_upgrade $2
set_osd_noout $1
;;
controller)
prepare_controller_upgrade "$@"
;;
*)
echo "Role $role unsupported, skipping"
;;
esac
done
assign_node_to_env $2 $1
}
upgrade_node_postprovision() {
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
wait_for_node $2 "provisioned"
}
upgrade_node_predeploy() {
local isolated="" roles
if [ "$1" = "--isolated" ]; then
isolated=$1
shift
fi
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
roles=$(fuel node --node $2 \
| awk -F\| '$1~/^'$2'/ {gsub(" ", "", $8);print $8}' \
| sed -re 's%,% %')
if [[ "$roles" =~ controller ]]; then
get_deployment_info $1
if [ "$isolated" ]; then
backup_deployment_info $1
remove_physical_transformations $1
fi
get_deployment_tasks $1
prepare_seed_deployment_info $1
merge_deployment_info $1
upload_deployment_info $1
upload_deployment_tasks $1
fi
}
upgrade_node_postdeploy() {
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
local roles=$(fuel node --node $2 \
| awk -F\| '$1~/^'$2'/ {gsub(" ", "", $7);print $7}' \
| sed -re 's%,% %')
wait_for_node $2 "ready"
for role in $roles
do
case $role in
compute)
cleanup_compute_upgrade "$@"
;;
ceph-osd)
unset_osd_noout $1
;;
controller)
neutron_update_admin_tenant_id $1
;;
esac
done
if [ "$3" == "isolated" ]; then
restore_default_gateway $2
fi
}
upgrade_node() {
# This function takes IDs of upgrade seed env and a node, deletes the node
# from original env and adds it to the seed env.
local isolated="" env n
if [ "$1" = "--isolated" ]; then
isolated=$1
shift
fi
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
env=$1 && shift
for n in $@; do
upgrade_node_preprovision $env $n
done
env_action $env provision "$@"
for n in $@; do
upgrade_node_postprovision $env $n
upgrade_node_predeploy $isolated $env $n
done
env_action $env deploy "$@"
for n in $@; do
upgrade_node_postdeploy $env $n $isolated
done
}
upgrade_cics() {
[ -z "$1" ] && die "No 5.1.1 env ID provided, exiting"
[ -z "$2" ] && die "No 6.0 env ID provided, exiting"
@ -564,16 +100,6 @@ upgrade_ceph() {
prepare_ceph_osd_upgrade $2
}
neutron_update_admin_tenant_id() {
local tenant_id=''
[ -z "$1" ] && die "No env ID provided, exiting"
cic_node=$(list_nodes $1 controller | head -1)
list_nodes $1 controller | xargs -I{} ssh root@{} \
"sed -re 's/^(nova_admin_tenant_id )=.*/\1 = $SERVICE_TENANT_ID/' \
-i /etc/neutron/neutron.conf;
restart neutron-server"
}
cleanup_nova_services() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
local cic=$(list_nodes $1 controller | head -1)

View File

@ -9,16 +9,6 @@ enable_apis() {
$PSSH_RUN "pkill haproxy"
}
stop_vip_resources() {
$PSSH_RUN "echo vip__management vip__public \
| xargs -I{} -d \ sh -c 'crm resource stop {}'"
}
start_vip_resources() {
$PSSH_RUN "echo vip__management vip__public \
| xargs -I{} -d \ sh -c 'crm resource stop {}'"
}
start_corosync_services() {
$PSSH_RUN "pcs resource \
| awk '/Clone Set:/ {print \$4; getline; print \$1}' \
@ -46,55 +36,3 @@ EOF
)
$PSSH_RUN "$command"
}
evacuate_neutron_agents() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -z "$(fuel node --node $1 | grep controller)" ] && \
die "Node $1 is not a controller, exiting"
local res
local dst_node=$(list_nodes $(get_env_by_node $1) controller \
| grep -v "node-$1" | head -1)
local src_node=$(get_host_ip_by_node_id $1)
for res in p_neutron-l3-agent p_neutron-dhcp-agent;
do
ssh root@$src_node "crm resource status $res \
| grep node-$1 && pcs resource move $res $dst_node"
done
}
mysql_maintenance_mode() {
[ -z "$1" ] && die "No env ID provided, exiting"
local cmd
case "$2" in
activate)
cmd="disable server"
;;
deactivate)
cmd="enable server"
;;
*)
die "Use 'activate/deactivate' as a second argument"
;;
esac
for node in $(list_nodes $1 controller);
do
ssh root@$(get_host_ip_by_node_id ${node#node-}) \
"echo '${cmd} mysqld/${node}' \
| socat stdio /var/lib/haproxy/stats"
done
}
cic_maintenance_mode() {
[ -z "$1" ] && die "No node ID provided, exiting"
host_ip=$(get_host_ip_by_node_id $1)
case "$2" in
activate)
ssh root@$host_ip "crm node maintenance"
disable_wsrep $1
;;
deactivate)
enable_wsrep $1
ssh root@$host_ip "crm node ready"
;;
esac
}

View File

@ -1,90 +0,0 @@
#!/bin/bash
get_nailgun_db_pass() {
# Parse nailgun configuration to get DB password for 'nailgun' database. Return
# the password.
echo $(dockerctl shell nailgun cat /etc/nailgun/settings.yaml \
| awk 'BEGIN {out=""}
/DATABASE/ {out=$0;next}
/passwd:/ {if(out!=""){out="";print $2}}' \
| tr -d '"')
}
PG_CMD="psql -At postgresql://nailgun:$(get_nailgun_db_pass)@localhost/nailgun"
get_node_group_id() {
[ -z "$1" ] && die "No env ID provided, exiting"
echo "select id from nodegroups where cluster_id = $1" \
| $PG_CMD
}
get_nailgun_net_id() {
local vip_type
local net_id
local group_id
[ -z "$1" ] && die "No group ID provided, exiting"
[ -z "$2" ] && die "No bridge name provided, exiting"
group_id=$(get_node_group_id $1)
vip_type=$(echo $2 | sed -e 's/br-ex/public/;s/br-mgmt/management/')
net_id=$(echo "select id from network_groups where group_id = ${group_id} and
name = '$vip_type';" | $PG_CMD)
echo $net_id
}
update_vip_nailgun_db() {
# Replace Virtual IP addresses assgined to 6.0 Seed environment in Nailgun DB
# with addresses from 5.1 environment
local vip
local seed_net_id
local orig_net_id
[ -z "$1" ] && die "No 5.1 and 6.0 env IDs provided, exiting"
[ -z "$2" ] && die "No 6.0 env ID provided, exiting"
[ -z "$3" ] && die "No bridge provided, exiting"
orig_net_id=$(get_nailgun_net_id $1 $3)
seed_net_id=$(get_nailgun_net_id $2 $3)
vip=$(echo "select ip_addr from ip_addrs where network = $orig_net_id and
node is null and vip_type = 'haproxy';" | $PG_CMD)
echo "update ip_addrs set ip_addr = '$vip' where network = $seed_net_id and
node is null and vip_type = 'haproxy';" | $PG_CMD
}
update_ips_nailgun_db() {
local orig_net_id
local seed_net_id
local tmpfile
local node
[ -z "$1" ] && die "No 5.1 and 6.0 env IDs provided, exiting"
[ -z "$2" ] && die "No 6.0 env ID provided, exiting"
[ -z "$3" ] && die "No bridge provided, exiting"
orig_net_id=$(get_nailgun_net_id $1 $3)
seed_net_id=$(get_nailgun_net_id $2 $3)
tmpfile="/tmp/env-$1-cics-$3-ips"
list_nodes $1 controller | sed -re "s,node-(.*),\1," | sort > $tmpfile
for node in $(list_nodes $2 controller | sed -re "s,node-(.*),\1," | sort)
do
orig_node=$(sed -i -e '1 w /dev/stdout' -e '1d' "$tmpfile")
echo "DROP TABLE IF EXISTS ip_$$;
SELECT ip_addr INTO ip_$$ FROM ip_addrs WHERE node = $orig_node AND network = $orig_net_id;
DELETE FROM ip_addrs WHERE node = $node AND network = $seed_net_id;
INSERT INTO ip_addrs VALUES(DEFAULT, $seed_net_id, $node,
(SELECT ip_addr FROM ip_$$), DEFAULT);
" | $PG_CMD
done
}
copy_generated_settings() {
# Update configuration of 6.0 environment in Nailgun DB to preserve generated
# parameters values from the original environmen.
local generated
[ -z "$1" ] && die "No 5.1 env ID provided, exiting"
[ -z "$2" ] && die "No 6.0 env ID provided, exiting"
generated=$(echo "select generated from attributes where cluster_id = $2;
select generated from attributes where cluster_id = $1;" \
| $PG_CMD \
| grep -v ^$ \
| python ${HELPER_PATH}/helpers/join-jsons.py);
[ -z "$generated" ] && die "No generated attributes found for env $1"
echo "update attributes set generated = '$generated' where cluster_id = $2" \
| $PG_CMD
}

View File

@ -16,21 +16,6 @@ revert_deployment_tasks() {
cp -pR "${FUEL_CACHE}/cluster_$1.orig" "${FUEL_CACHE}/cluster_$1"
}
restore_default_gateway() {
[ -z "$1" ] && die "No node ID provided, exiting"
local env_id=$(get_env_by_node $1)
local nodefile=$(ls ${FUEL_CACHE}/deployment_${env_id}.orig/*_$1.yaml | head -1)
local gw_ip=$(python -c "import yaml;
with open('"${nodefile}"') as f:
config = yaml.safe_load(f)
ints = config['network_scheme']['endpoints']
print ints['br-ex']['gateway']")
[ -z "$gw_ip" ] && return
[[ "$gw_ip" =~ none ]] && return
ssh root@node-$1 "ip route delete default;
ip route add default via $gw_ip"
}
revert_patch_fuel_components() {
local cmp
[ -z "$1" ] && die "No component name provided, exiting"

View File

@ -35,45 +35,11 @@ get_host_ip_by_node_id() {
echo $(fuel node | awk -F"|" '/^'$1' /{print($5)}' | tr -d ' ')
}
get_last_node() {
echo $(fuel node | awk -F\| '$1 ~ /[0-9]+[ ]+/{print($1)}' \
| sort -n | tail -1)
}
get_node_online() {
[ -z "$1" ] && die "No node ID provided, exiting"
fuel node --node "$1" | tail -1 | awk -F\| '{gsub(" ", "", $9);print($9)}'
}
wait_for_node() {
[ -z "$1" ] && die "No node ID provided, exiting"
[ -z "$2" ] && die "No expected status provided, exiting"
local counter=0
while :
do
[ $counter -gt 120 ] && die "Wait for node-$1 $2 timed out, exiting"
local status=$(fuel node --node $1 \
| awk -F\| '/^'$1'/ {gsub(" ", "", $2);print $2}')
local online=$(get_node_online $1)
[ "$status" == "$2" ] && [ "$online" == "True" ] && break
# Die in case of unexpected fall into 'error' state. Expected error
# will be caught in previous statement.
[ "$status" == "error" ] &&
die "Node $1 failed transition to $2 state, exiting"
counter=$(expr $counter + 1)
sleep 60
done
}
check_env_nodes() {
local node
[ -z "$1" ] && die "No env ID provided, exiting"
for node in $(list_nodes $1 "(controller|compute|ceph-osd)")
do
ping -c1 $node || die "Node $node inaccessible, exiting"
done
}
list_nodes() {
local roles_re
[ -z "$1" ] && die "No env ID provided, exiting"