Merge stable/6.1 into master

Change-Id: Idd40ccd0ec510fe13521a6276774f6d9252349d7
This commit is contained in:
Yuriy Taraday 2015-08-18 14:37:50 +03:00
commit 8c2decbee3
9 changed files with 140 additions and 27 deletions

View File

@ -146,7 +146,7 @@ Repeat this process until all nodes are reassigned from 5.1.1 to 6.1 environment
## Finish upgrade
### Cleanup 6.1 environment
### Clean up 6.1 environment
Run Octane script with 'cleanup' command to delete pending services data from
state database.
@ -154,3 +154,12 @@ state database.
```
[root@fuel bin]# ./octane cleanup <SEED_ID>
```
### Clean up the Fuel Master node
Run 'cleanup-fuel' command to revert all changes made to components of the Fuel
installer and uninstall temporary packages.
```
[root@fuel bin]# ./octane cleanup-fuel
```

View File

@ -10,6 +10,7 @@ export BINPATH="$CWD/bin"
export LIBPATH="$CWD/lib"
export HELPER_PATH="$CWD/helpers"
export PATCH_DIR="$CWD/patches"
export SERVICE_TENANT_ID=""
. ${LIBPATH}/utils.sh
. ${LIBPATH}/nailgundb.sh
@ -18,3 +19,4 @@ export PATCH_DIR="$CWD/patches"
. ${LIBPATH}/maintenance.sh
. ${LIBPATH}/patch.sh
. ${LIBPATH}/ceph.sh
. ${LIBPATH}/revert.sh

View File

@ -20,12 +20,19 @@ nova service-list | grep -q 'nova-compute.*enabled' || {
exit 3
}
nova list --host $1 | grep ' ACTIVE ' | cut -d\| -f3 | sed -r 's/(^[ ]+?|[ ]+?$)//g' | xargs -tI% nova live-migration %
while :; do
VMS=$(nova list --host $1 | wc -l)
if [ $VMS -eq 4 ]; then
break
VMS=$(nova list --host $1 | grep -i ' active ' | wc -l)
if [ $VMS -ne 0 ]; then
for VM in $(nova list --host $1 | grep ' ACTIVE ' \
| cut -d\| -f3 | sed -r 's/(^[ ]+?|[ ]+?$)//g'); do
nova live-migration $VM
done
else
VMS=$(nova list --host $1 | grep -i ' migrating ' | wc -l)
if [ $VMS -ne 0 ]; then
sleep 30
else
echo "All VMs migrated" && exit 0
fi
fi
sleep 30
done

View File

@ -21,6 +21,7 @@ upgrade-node [--isolated] - move a node NODE_ID to env ENV_ID and upgrade
to Management and Public networks
cleanup ENV_ID - delete data about original environment from
upgraded OpenStack cloud
cleanup-fuel - revert changes on the Fuel Master
help - display this message and exit"
}
@ -64,6 +65,9 @@ case $1 in
cleanup_nova_services $2
cleanup_neutron_services $2
;;
cleanup-fuel)
cleanup_fuel
;;
help)
usage
;;

View File

@ -125,15 +125,25 @@ def lnx_add_port(actions, bridge):
def ovs_add_patch_ports(actions, bridge):
for action in actions:
if (action.get("action") == "add-patch" and
bridge in action.get("bridges")):
bridges = action.get("bridges")
bridge in action.get("bridges", [])):
bridges = action.get("bridges", [])
tags = action.get("tags", ["", ""])
trunks = action.get("trunks", [])
for tag in tags:
if tag:
tag = "tag={0}".format(str(tag))
trunk_str = ",".join(trunks)
if trunk_str:
trunk_param = "trunks=[{0}]".format(trunk_str)
if bridges:
return ["ovs-vsctl add-port {0} {0}--{1} "
return ["ovs-vsctl add-port {0} {0}--{1} {3} {4}"
"-- set interface {0}--{1} type=patch "
"options:peer={1}--{0}".format(bridges[0], bridges[1]),
"ovs-vsctl add-port {1} {1}--{0} "
"options:peer={1}--{0}"
.format(bridges[0], bridges[1], tags[0], trunk_param),
"ovs-vsctl add-port {1} {1}--{0} {3} {4}"
"-- set interface {1}--{0} type=patch "
"options:peer={0}--{1}".format(bridges[0], bridges[1])]
"options:peer={0}--{1}"
.format(bridges[0], bridges[1], tags[1], trunk_param)]
def main():

View File

@ -148,7 +148,7 @@ prepare_ceph_osd_upgrade() {
ssh root@$node sh -c "'
f=\$(mktemp)
awk -f /dev/stdin /etc/ceph/ceph.conf > \$f
mv \$f /etc/ceph/ceph.conf && chmod 644 /etc/ceph/ceph.conf
chmod 644 \$f && mv \$f /etc/ceph/ceph.conf
'" <<EOF
BEGIN {
flag = 0

View File

@ -13,6 +13,23 @@ pycmd() {
exit $?
}
get_service_tenant_id() {
[ -z "$1" ] && die "No node ID provided, exiting"
local env=$(get_env_by_node $1)
local filename="${FUEL_CACHE}/env-${env}-service-tenant-id"
if [ -f "$filename" ]; then
SERVICE_TENANT_ID=$(cat $filename)
else
SERVICE_TENANT_ID=$(ssh root@$(get_host_ip_by_node_id $1) ". openrc;
keystone tenant-get services \
| awk -F\| '\$2 ~ /id/{print \$3}' | tr -d \ ")
fi
[ -z "$SERVICE_TENANT_ID" ] &&
die "Cannot determine service tenant ID for node $1, exiting"
echo $SERVICE_TENANT_ID > $filename
}
get_deployment_info() {
local cmd
# Download deployment config from Fuel master for environment ENV to subdir in
@ -38,6 +55,13 @@ upload_deployment_info() {
fuel deployment --env $1 --upload --dir $FUEL_CACHE
}
backup_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
[ -d "${FUEL_CACHE}/cluster_$1" ] &&
cp -pR "${FUEL_CACHE}/cluster_$1" "${FUEL_CACHE}/cluster_$1.orig"
}
upload_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
@ -385,6 +409,8 @@ cleanup_compute_upgrade() {
prepare_controller_upgrade() {
[ -z "$1" ] && die "No 6.0 env and node ID provided, exiting"
[ -z "$2" ] && die "No node ID provided, exiting"
#Required for updating tenant ID in Neutron config on 6.1
get_service_tenant_id $2
}
upgrade_node_preprovision() {
@ -471,9 +497,13 @@ upgrade_node_postdeploy() {
unset_osd_noout $1
;;
controller)
neutron_update_admin_tenant_id $1
;;
esac
done
if [ "$3" == "isolated" ]; then
restore_default_gateway $2
fi
}
upgrade_node() {
@ -497,7 +527,7 @@ upgrade_node() {
done
env_action $env deploy "$@"
for n in $@; do
upgrade_node_postdeploy $env $n
upgrade_node_postdeploy $env $n $isolated
done
}
@ -520,7 +550,6 @@ upgrade_cics() {
do
create_patch_ports $2 $br_name
done
neutron_update_admin_tenant_id $2
list_nodes $1 compute | xargs -I{} ${BINPATH}/upgrade-nova-compute.sh {}
}
@ -538,14 +567,8 @@ neutron_update_admin_tenant_id() {
local tenant_id=''
[ -z "$1" ] && die "No env ID provided, exiting"
cic_node=$(list_nodes $1 controller | head -1)
while [ -z "$tenant_id" ]; do
tenant_id=$(ssh root@$cic_node ". openrc;
keystone tenant-get services \
| awk -F\| '$2 ~ /id/{print $3}' | tr -d \ ")
sleep 3
done
list_nodes $1 controller | xargs -I{} ssh root@{} \
"sed -re 's/^(nova_admin_tenant_id )=.*/\1 = $tenant_id/' \
"sed -re 's/^(nova_admin_tenant_id )=.*/\1 = $SERVICE_TENANT_ID/' \
-i /etc/neutron/neutron.conf;
restart neutron-server"
}
@ -577,3 +600,7 @@ delete_fuel_resources() {
scp $HELPER_PATH/delete_fuel_resources.py root@$host:/tmp
ssh root@$host ". openrc; python /tmp/delete_fuel_resources.py"
}
cleanup_fuel() {
revert_prepare_fuel
}

48
octane/lib/revert.sh Normal file
View File

@ -0,0 +1,48 @@
# vim: syntax=sh
REVERT_PATH="$(readlink -e "$BASH_SOURCE")"
OCTANE_PATH="$(readlink -e "$(dirname "$REVERT_PATH")/..")"
## functions
revert_prepare_fuel () {
revert_patch_fuel_components puppet
revert_all_patches
}
revert_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
[ -d "${FUEL_CACHE}/cluster_$1" ] &&
cp -pR "${FUEL_CACHE}/cluster_$1.orig" "${FUEL_CACHE}/cluster_$1"
}
restore_default_gateway() {
[ -z "$1" ] && die "No node ID provided, exiting"
local env_id=$(get_env_by_node $1)
local nodefile=$(ls ${FUEL_CACHE}/deployment_${env_id}.orig/*_$1.yaml | head -1)
local gw_ip=$(python -c "import yaml;
with open('"${nodefile}"') as f:
config = yaml.safe_load(f)
ints = config['network_scheme']['endpoints']
print ints['br-ex']['gateway']")
[ -z "$gw_ip" ] && return
[[ "$gw_ip" =~ none ]] && return
ssh root@node-$1 "ip route delete default;
ip route add default via $gw_ip"
}
revert_patch_fuel_components() {
local cmp
[ -z "$1" ] && die "No component name provided, exiting"
for cmp in "$@";
do
[ -d "$PATCH_DIR/$cmp" ] || die "No dir for component $cmp, exiting"
pushd "$PATCH_DIR/$cmp"
[ -x "./revert.sh" ] && ./revert.sh
popd
done
}
function revert_all_patches() {
PATCH_EXTRA_ARGS="-R" patch_all_containers
}

View File

@ -1,3 +1,4 @@
import random
import time
import neutronclient.neutron.client
@ -123,12 +124,13 @@ class TestResourcesGenerator(object):
floatingip_list = self.neutron.list_floatingips()['floatingips']
for net in xrange(networks_count):
router = self._create_router("testrouter{0}".format(net))
network = self._create_network("testnet{0}".format(net))
name = random.randint(0x000000, 0xffffff)
router = self._create_router("testrouter{0}".format(name))
network = self._create_network("testnet{0}".format(name))
subnet = self._create_subnet(network, "12.0.{0}.0/24".format(net))
self._uplink_subnet_to_router(router, subnet)
for vm in xrange(vms_per_net):
server = self._create_server("testserver{0}{1}".format(net,
server = self._create_server("testserver{0}-{1}".format(name,
vm),
image_id,
flavor.id,
@ -155,9 +157,13 @@ if __name__ == '__main__':
help='admin tenant')
parser.add_argument('keystone_url', metavar='<keystone_url>', type=str,
help='Keystone url')
parser.add_argument('--num-routers', type=int, default=3,
help='Number of routers')
parser.add_argument('--num-servers', type=int, default=5,
help='Number of servers')
args = parser.parse_args()
generator = TestResourcesGenerator(args.username, args.password,
args.tenant_name,
args.keystone_url)
generator.infra_generator(3, 5)
generator.infra_generator(args.num_routers, args.num_servers)