Browse Source

Merge branch 'stable/7.0'

Change-Id: I5f27d803e43b56e96c4dd29c6357133783a9e671
tags/9.0
Yuriy Taraday 3 years ago
parent
commit
cae1d8af5d

+ 0
- 31
octane/bin/create-controller-ports View File

@@ -1,31 +0,0 @@
1
-#!/usr/bin/python
2
-import sys
3
-import os
4
-sys.path.append(os.path.normpath("{0}/../../helpers/".format(__file__)))
5
-import transformations as t
6
-
7
-
8
-PROVIDERS = {
9
-    "ovs": t.ovs_add_patch_ports,
10
-    "lnx": t.lnx_add_port
11
-}
12
-
13
-def main():
14
-    node_file = sys.argv[1]
15
-    bridge = sys.argv[2]
16
-
17
-    host_config = t.load_yaml_file(node_file)
18
-    actions = host_config["network_scheme"]["transformations"]
19
-    provider = t.get_bridge_provider(actions, bridge)
20
-    if not provider:
21
-        provider = "lnx"
22
-    commands = []
23
-    if provider in PROVIDERS:
24
-        get_commands = PROVIDERS[provider]
25
-        commands = get_commands(actions, bridge)
26
-        for command in commands:
27
-            print command
28
- 
29
-
30
-if __name__ == '__main__':
31
-    main()

+ 0
- 20
octane/bin/env View File

@@ -1,20 +0,0 @@
1
-KEY=0 # Start value for GRE port keys in OVS configuration
2
-NODE_ID=0 # This constant stores ID of a node after it is reassigned to the seed
3
-          # environment
4
-export FUEL_CACHE="/tmp/octane/deployment" # Directory to store deployment information
5
-export PUPPET_PATH="/etc/puppet/2014.2.2-6.1/modules"
6
-
7
-export CWD=$(dirname `readlink -f ${BASH_SOURCE[0]}`)"/../"
8
-
9
-export BINPATH="$CWD/bin"
10
-export LIBPATH="$CWD/lib"
11
-export HELPER_PATH="$CWD/helpers"
12
-export PATCH_DIR="$CWD/patches"
13
-export SERVICE_TENANT_ID=""
14
-
15
-. ${LIBPATH}/utils.sh
16
-. ${LIBPATH}/functions.sh
17
-. ${LIBPATH}/maintenance.sh
18
-. ${LIBPATH}/patch.sh
19
-. ${LIBPATH}/ceph.sh
20
-. ${LIBPATH}/revert.sh

+ 0
- 80
octane/bin/octane View File

@@ -1,80 +0,0 @@
1
-#!/bin/bash
2
-
3
-[ "$1" == "-d" ] && {
4
-    set -x
5
-    shift
6
-}
7
-
8
-usage() {
9
-    echo "Usage: $(basename $0) [-d] COMMAND ENV_ID [ENV_ID]
10
-COMMAND:
11
-prepare                         - prepare the Fuel Master node to upgrade an
12
-                                  environment
13
-upgrade-env ENV_ID              - create upgrade seed env for env ENV_ID and
14
-                                  copy settings from ENV_ID to upgrade seed env
15
-upgrade-cics ORIG_ID SEED_ID    - transfer state from controllers in original
16
-                                  env to upgrade seed env and replace CICs
17
-upgrade-db ORIG_ID SEED_ID      - migrate and upgrade state databases data
18
-upgrade-ceph ORIG_ID SEED_ID    - update Ceph cluster configuration
19
-upgrade-node [--isolated]       - move a node NODE_ID to env ENV_ID and upgrade the node
20
-    ENV_ID NODE_ID [NODE_ID ...]  --isolated flag means that node won't be connected
21
-                                  to Management and Public networks
22
-cleanup ENV_ID                  - delete data about original environment from
23
-                                  upgraded OpenStack cloud
24
-cleanup-fuel                    - revert changes on the Fuel Master
25
-help                            - display this message and exit"
26
-}
27
-
28
-. `dirname $(readlink -f $0)`/env
29
-
30
-[ -z "$1" ] && die "$(usage)"
31
-
32
-case $1 in
33
-    prepare)
34
-        pycmd "$@"
35
-        ;;
36
-    upgrade-env)
37
-        pycmd "$@"
38
-        ;;
39
-    upgrade-cics)
40
-        [ -z "$2" ] && die "$(usage)"
41
-        [ -z "$3" ] && die "$(usage)"
42
-        upgrade_cics $2 $3
43
-        ;;
44
-    upgrade-db)
45
-        pycmd "$@"
46
-        ;;
47
-    upgrade-ceph)
48
-        [ -z "$2" ] && die "$(usage)"
49
-        [ -z "$3" ] && die "$(usage)"
50
-        upgrade_ceph $2 $3
51
-        ;;
52
-    upgrade-node)
53
-        pycmd "$@"
54
-        ;;
55
-    upgrade-nova-compute)
56
-    # TODO(ogelbukh) delete as obsoleted by upgrade-cics command.
57
-        [ -z "$2" ] && die "$(usage)"
58
-        list_nodes $2 compute \
59
-        | xargs -I{} ./upgrade-nova-compute.sh {}
60
-        ;;
61
-    cleanup)
62
-        [ -z "$2" ] && die "$(usage)"
63
-        cleanup_nova_services $2
64
-        cleanup_neutron_services $2
65
-        ;;
66
-    cleanup-fuel)
67
-        cleanup_fuel
68
-        ;;
69
-    help)
70
-        usage
71
-        ;;
72
-     *)
73
-        echo "Invalid command: $1"
74
-        usage
75
-        exit 1
76
-        ;;
77
-esac
78
-
79
-exit 0
80
-# vi:sw=4:ts=4:

+ 0
- 96
octane/bin/upgrade-neutron.sh View File

@@ -1,96 +0,0 @@
1
-#!/bin/bash
2
-
3
-set -ex
4
-
5
-SRC=${4:-/etc/neutron} 
6
-TMPL=${3:-neutron-template} 
7
-TEMPLATE_FILE=../patches/neutron-template.tar
8
-
9
-function log {
10
-	echo $* > /dev/stderr
11
-} 
12
-
13
-function exit_error {
14
-  log "Error"
15
-  exit 1
16
-}
17
-
18
-function exit_success {
19
-  log "Success"
20
-  exit 0
21
-} 
22
-
23
-function tmpl_var_names {
24
-	egrep -Rho '%[A-Z_]+%' $1 | sed -r ':a;N;$!ba;s/\n/\l|/g;s/^/^(/;s/$/)/' | sed 's/\(.*\)/\L\1/;s/%//g'
25
-} 
26
-
27
-function tmpl_var_values {
28
-	sed -r 's/[ ]+?=[ ]+?/=/g' | awk -F= '/=/ {printf("s/%%%s%%/%s/g;\n", toupper($1), $2)}'
29
-} 
30
-
31
-function prepare() {
32
-	local TMPL_DIR=$1
33
-	local SRC_DIR=$2
34
-	local OUTPUT_DIR="/tmp/neutron-$$"
35
-	log "Check source and template dirs"
36
-	test -d $SRC_DIR -a -d $TMPL_DIR
37
-
38
-
39
-	log "Generate variable names"
40
-	var_names=`tmpl_var_names $TMPL_DIR`
41
-
42
-	log "Get values from source dir" 
43
-	var_values=`egrep -hR "$var_names" $SRC_DIR | tmpl_var_values`
44
-
45
-	cp -r $TMPL_DIR $OUTPUT_DIR
46
-
47
-	find $OUTPUT_DIR -type f | xargs -tI{} sed -ri'' "$var_values" {} 
48
-
49
-	echo $OUTPUT_DIR
50
-} 
51
-
52
-function install() {
53
-	local SRC_DIR=$1
54
-	local DST_DIR=$2
55
-	test -d $SRC_DIR -a -d $DST_DIR
56
-	
57
-	test -z "$NEUTRON_BACKUP" && {
58
-		tar cvf /tmp/neutron-before-upgrade$$.tar $DST_DIR
59
-	} 
60
-	rm -rf $DST_DIR 
61
-	cp -vr $SRC_DIR $DST_DIR
62
-	test -f $DST_DIR/plugins/ml2/ml2_conf.ini 
63
-	ln -s $DST_DIR/plugins/ml2/ml2_conf.ini $DST_DIR/plugin.ini
64
-	test -h $DST_DIR/plugin.ini	
65
-	chown -R root:neutron $DST_DIR
66
-} 
67
-
68
-function bootstrap() {
69
-	local NODE=$1
70
-	test -f $0 -a -f ${TEMPLATE_FILE} 
71
-	scp $0 ${TEMPLATE_FILE} ${NODE}:
72
-	ssh ${NODE} "test -d neutron-template || mkdir neutron-template; tar xvf `basename $TEMPLATE_FILE` -C neutron-template"
73
-} 
74
-
75
-trap exit_error EXIT
76
-
77
-case "$1" in
78
-	prepare)
79
-		prepare $2 "/etc/neutron"	
80
-	;;
81
-
82
-	install)
83
-		install $2 "/etc/neutron"
84
-	;;
85
-
86
-	bootstrap) 
87
-		bootstrap $2
88
-	;;
89
-
90
-	*)
91
-		echo "Usage: $0 [prepare|install]"
92
-		exit 1
93
-	
94
-esac
95
-
96
-trap exit_success EXIT

+ 0
- 53
octane/bin/upgrade-nova-compute.sh View File

@@ -1,53 +0,0 @@
1
-#!/bin/sh -e
2
-extract_vars() {
3
-        sed -re '/^\+.*%.*/ s/.*%([^%]+)%.*/\L\1/;tx;d;:x' $1
4
-}
5
-
6
-convert_vars_to_regex() {
7
-        tr "\n" " "| sed -re 's,^,^(,;s,.$,),;s, ,|,g'
8
-}
9
-
10
-generate_template_regex() {
11
-        egrep "`extract_vars $1 | convert_vars_to_regex`" | awk -F= '{key = gensub(" ", "", "g", $1); printf("s|%%%s%%|%s|g;", toupper(key), $2)}'
12
-}
13
-
14
-
15
-upgrade_compute_service() {
16
-	local regex
17
-	local nova_regex
18
-	#regex=$(ssh $1 "find /etc/neutron -type f -exec cat {} \;" | generate_template_regex $PATCH)
19
-	./upgrade-neutron.sh bootstrap $1
20
-	local tmp_dir=`ssh $1 ./upgrade-neutron.sh prepare neutron-template`
21
-	if [ -z "$tmp_dir" ]; then
22
-		echo "Tmp dir err"
23
-		exit 1
24
-	fi
25
-	nova_regex=$(ssh $1 "cat /etc/nova/nova.conf" | generate_template_regex $NOVA_PATCH)
26
-	#sed -r "$regex" ${PATCH}  | ssh $1 "tee /tmp/patch-neutron-config_$1.patch"
27
-	ssh $1 "apt-get update; apt-get install -o Dpkg::Options::='--force-confnew' --yes nova-compute"
28
-	#ssh $1 "cd /etc/neutron && patch -p0 < /tmp/patch-neutron-config_$1.patch"
29
-	cat ${NOVA_PATCH} | sed -r "${nova_regex}" | ssh $1 "cat > /etc/nova/nova.conf"
30
-	ssh $1 ./upgrade-neutron.sh install $tmp_dir
31
-	ssh $1 'restart nova-compute && ( stop neutron-plugin-openvswitch-agent; start neutron-plugin-openvswitch-agent )'
32
-} 
33
-
34
-add_apt_sources() {
35
-	local source
36
-	source="http://$(grep fuel /etc/hosts | cut -d \  -f1):8080/2014.2-6.0/ubuntu/x86_64"
37
-	printf "\ndeb $source precise main\n" | ssh $1 "cat >> /etc/apt/sources.list"
38
-}
39
-
40
-
41
-[ -f "./functions" ] && . ./functions
42
-
43
-[ -z "$1" ] && die "No node ID provided, exiting"
44
-PATCH=${2-"../patches/neutron-upgrade.patch"}
45
-NOVA_PATCH=${3-"../patches/nova.conf"}
46
-
47
-if [ ! -f "$PATCH" -o ! -f "$NOVA_PATCH" ]; then
48
-    echo "Usage $0 NODE_ID [NEUTRON_PATCH_PATH] [NOVA_PATCH_PATH]" >> /dev/stderr
49
-    exit 1
50
-fi
51
-
52
-add_apt_sources $1
53
-upgrade_compute_service $1

+ 4
- 5
octane/commands/install_node.py View File

@@ -109,14 +109,13 @@ def install_node(orig_id, seed_id, node_ids, isolated=False, networks=None):
109 109
     if networks:
110 110
         env_util.clone_ips(orig_id, networks)
111 111
 
112
-    node_util.reboot_nodes(nodes)
112
+    LOG.info("Nodes reboot in progress. Please wait...")
113
+    node_util.reboot_nodes(nodes, timeout=180 * 60)
113 114
     node_util.wait_for_mcollective_start(nodes)
114 115
     env_util.provision_nodes(seed_env, nodes)
115 116
 
116
-    for node in nodes:
117
-        # FIXME: properly call all handlers all over the place
118
-        controller_upgrade.ControllerUpgrade(
119
-            node, seed_env, isolated=isolated).predeploy()
117
+    env_util.update_deployment_info(seed_env, isolated)
118
+
120 119
     if isolated and len(nodes) > 1:
121 120
         isolate(nodes, seed_env)
122 121
 

+ 11
- 3
octane/commands/prepare.py View File

@@ -22,15 +22,23 @@ from octane.util import subprocess
22 22
 
23 23
 
24 24
 def patch_puppet(revert=False):
25
-    direction = "-R" if revert else "-N"
26 25
     puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet")
27 26
     for d in os.listdir(puppet_patch_dir):
28 27
         d = os.path.join(puppet_patch_dir, d)
29 28
         if not os.path.isdir(d):
30 29
             continue
31 30
         with open(os.path.join(d, "patch")) as patch:
32
-            subprocess.call(["patch", direction, "-p3"], stdin=patch,
33
-                            cwd=magic_consts.PUPPET_DIR)
31
+            try:
32
+                subprocess.call(["patch", "-R", "-p3"], stdin=patch,
33
+                                cwd=magic_consts.PUPPET_DIR)
34
+            except subprocess.CalledProcessError:
35
+                if not revert:
36
+                    pass
37
+                else:
38
+                    raise
39
+            if not revert:
40
+                subprocess.call(["patch", "-N", "-p3"], stdin=patch,
41
+                                cwd=magic_consts.PUPPET_DIR)
34 42
 
35 43
 
36 44
 def apply_patches(revert=False):

+ 49
- 0
octane/commands/rollback_controlplane.py View File

@@ -0,0 +1,49 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+from cliff import command as cmd
13
+from fuelclient.objects import environment as environment_obj
14
+
15
+from octane.helpers import network
16
+from octane.util import env as env_util
17
+from octane.util import maintenance
18
+
19
+
20
+def rollback_control_plane(seed_id, orig_id):
21
+    seed_env = environment_obj.Environment(seed_id)
22
+    orig_env = environment_obj.Environment(orig_id)
23
+    # switch physical networks connectivity to orig_env
24
+    roles = ['primary-controller', 'controller']
25
+    for node, info in env_util.iter_deployment_info(seed_env, roles):
26
+        network.delete_patch_ports(node, info)
27
+    for node, info in env_util.iter_deployment_info(orig_env, roles):
28
+        network.create_patch_ports(node, info)
29
+    # enable cluster's services for orig_env
30
+    maintenance.start_cluster(orig_env)
31
+    maintenance.start_corosync_services(orig_env)
32
+    maintenance.enable_apis(orig_env)
33
+
34
+
35
+class RollbackControlPlaneCommand(cmd.Command):
36
+    """Rollback control plane to the orig environment"""
37
+
38
+    def get_parser(self, prog_name):
39
+        parser = super(RollbackControlPlaneCommand, self).get_parser(prog_name)
40
+        parser.add_argument(
41
+            'seed_id', type=int, metavar='SEED_ID',
42
+            help="ID of seed environment")
43
+        parser.add_argument(
44
+            'orig_id', type=int, metavar='ORIG_ID',
45
+            help="ID of original environment")
46
+        return parser
47
+
48
+    def take_action(self, parsed_args):
49
+        rollback_control_plane(parsed_args.seed_id, parsed_args.orig_id)

+ 1
- 2
octane/commands/update_plugin_settings.py View File

@@ -12,7 +12,6 @@
12 12
 
13 13
 import argparse
14 14
 import logging
15
-import pyzabbix
16 15
 import re
17 16
 import requests
18 17
 
@@ -81,9 +80,9 @@ def get_zabbix_client(astute):
81 80
     session.proxies = {
82 81
         'http': 'http://{0}:8888'.format(node_ip)
83 82
     }
83
+    import pyzabbix
84 84
     client = pyzabbix.ZabbixAPI(server=url, session=session)
85 85
     client.login(user=user, password=password)
86
-
87 86
     return client
88 87
 
89 88
 

+ 168
- 0
octane/commands/upgrade_ceph.py View File

@@ -0,0 +1,168 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+
13
+import contextlib
14
+import itertools
15
+import os
16
+import re
17
+import subprocess
18
+import tarfile
19
+
20
+from cliff import command as cmd
21
+from fuelclient.objects import environment as environment_obj
22
+
23
+from octane import magic_consts
24
+from octane.util import env as env_util
25
+from octane.util import node as node_util
26
+from octane.util import ssh
27
+
28
+
29
+def short_hostname(hostname):
30
+    return hostname.partition('.')[0]
31
+
32
+
33
+def remove_mask(ip_addr):
34
+    return ip_addr.partition('/')[0]
35
+
36
+
37
+def replace_addresses(conf, hostnames, mgmt_ips):
38
+    mon_initial_members = ' '.join(hostnames)
39
+    mon_host = ' '.join(mgmt_ips)
40
+
41
+    conf = re.sub(r'\n(mon_initial_members\s+=\s+)[-.\w\s]*\n',
42
+                  "\n\g<1>{0}\n".format(mon_initial_members),
43
+                  conf)
44
+    conf = re.sub(r'\n(mon_host\s+=\s+)[-.\w\s]*\n',
45
+                  "\n\g<1>{0}\n".format(mon_host),
46
+                  conf)
47
+    return conf
48
+
49
+
50
+def get_fsid(conf):
51
+    match = re.search(r'\nfsid\s+=\s+([-.\w]+)\s*\n', conf)
52
+    if match is not None:
53
+        return match.group(1)
54
+
55
+
56
+def replace_host(conf, hostname):
57
+    conf = re.sub(r'\n(host\s+=\s+)[-.\w\s]*\n',
58
+                  "\n\g<1>{0}\n".format(hostname),
59
+                  conf)
60
+    return conf
61
+
62
+
63
+def import_bootstrap_osd(node):
64
+    ssh.call(['ceph', 'auth', 'import', '-i',
65
+              '/root/ceph.bootstrap-osd.keyring'], node=node)
66
+    ssh.call(['ceph', 'auth', 'caps', 'client.bootstrap-osd', 'mon',
67
+              "'allow profile bootstrap-osd'"], node=node)
68
+
69
+
70
+def get_ceph_conf_filename(node):
71
+    cmd = [
72
+        'bash', '-c',
73
+        'pgrep ceph-mon | xargs -I{} cat /proc/{}/cmdline',
74
+    ]
75
+    cmdlines = ssh.call_output(cmd, node=node)
76
+    if cmdlines:
77
+        cmdline = cmdlines.split('\n')[0].split('\0')
78
+        for i, value in enumerate(cmdline):
79
+            if value == '-c' and i < len(cmdline):
80
+                return cmdline[i + 1]
81
+    return '/etc/ceph/ceph.conf'
82
+
83
+
84
+def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
85
+    nodes = list(env_util.get_controllers(seed_env))
86
+    hostnames = map(short_hostname, node_util.get_hostnames(nodes))
87
+    mgmt_ips = map(remove_mask, node_util.get_ips('management', nodes))
88
+
89
+    with contextlib.closing(tarfile.open(filename)) as f:
90
+        conf = f.extractfile(conf_filename).read()
91
+        conf = replace_addresses(conf, hostnames, mgmt_ips)
92
+
93
+    fsid = get_fsid(conf)
94
+    monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']
95
+    for node_hostname, node_ip in itertools.izip(hostnames, mgmt_ips):
96
+        monmaptool_cmd += ['--add', node_hostname, node_ip]
97
+
98
+    for node, node_hostname in itertools.izip(nodes, hostnames):
99
+        node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
100
+        node_conf = replace_host(conf, node_hostname)
101
+        try:
102
+            ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
103
+                     node=node)
104
+        except subprocess.CalledProcessError:
105
+            pass
106
+        ssh.call(['rm', '-rf', node_db_path], node=node)
107
+        node_util.untar_files(filename, node)
108
+        sftp = ssh.sftp(node)
109
+        with sftp.open(conf_filename, 'w') as f:
110
+            f.write(node_conf)
111
+        ssh.call(['mv', db_path, node_db_path], node=node)
112
+
113
+        sysvinit = os.path.join(node_db_path, 'sysvinit')
114
+        try:
115
+            sftp.remove(sysvinit)
116
+        except IOError:
117
+            pass
118
+        upstart = os.path.join(node_db_path, 'upstart')
119
+        sftp.open(upstart, 'w').close()
120
+
121
+        with ssh.tempdir(node) as tempdir:
122
+            monmap_filename = os.path.join(tempdir, 'monmap')
123
+            ssh.call(monmaptool_cmd + [monmap_filename], node=node)
124
+            ssh.call(['ceph-mon', '-i', node_hostname, '--inject-monmap',
125
+                      monmap_filename], node=node)
126
+
127
+    for node, node_hostname in itertools.izip(nodes, hostnames):
128
+        ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
129
+                 node=node)
130
+    import_bootstrap_osd(nodes[0])
131
+
132
+
133
+def extract_mon_conf_files(orig_env, tar_filename):
134
+    controller = env_util.get_one_controller(orig_env)
135
+    conf_filename = get_ceph_conf_filename(controller)
136
+    conf_dir = os.path.dirname(conf_filename)
137
+    hostname = short_hostname(
138
+        node_util.get_hostname_remotely(controller))
139
+    db_path = "/var/lib/ceph/mon/ceph-{0}".format(hostname)
140
+    node_util.tar_files(tar_filename, controller, conf_dir, db_path)
141
+    return conf_filename, db_path
142
+
143
+
144
+def upgrade_ceph(orig_id, seed_id):
145
+    orig_env = environment_obj.Environment(orig_id)
146
+    seed_env = environment_obj.Environment(seed_id)
147
+
148
+    tar_filename = os.path.join(magic_consts.FUEL_CACHE,
149
+                                "env-{0}-ceph.conf.tar.gz".format(orig_id))
150
+    conf_filename, db_path = extract_mon_conf_files(orig_env, tar_filename)
151
+    ceph_set_new_mons(seed_env, tar_filename, conf_filename, db_path)
152
+
153
+
154
+class UpgradeCephCommand(cmd.Command):
155
+    """update Ceph cluster configuration."""
156
+
157
+    def get_parser(self, prog_name):
158
+        parser = super(UpgradeCephCommand, self).get_parser(prog_name)
159
+        parser.add_argument(
160
+            'orig_id', type=int, metavar='ORIG_ID',
161
+            help="ID of original environment")
162
+        parser.add_argument(
163
+            'seed_id', type=int, metavar='SEED_ID',
164
+            help="ID of seed environment")
165
+        return parser
166
+
167
+    def take_action(self, parsed_args):
168
+        upgrade_ceph(parsed_args.orig_id, parsed_args.seed_id)

+ 23
- 34
octane/commands/upgrade_controlplane.py View File

@@ -9,45 +9,15 @@
9 9
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10 10
 # License for the specific language governing permissions and limitations
11 11
 # under the License.
12
-import os
13
-import yaml
14
-
15 12
 from cliff import command as cmd
16 13
 from fuelclient.objects import environment as environment_obj
17 14
 
18 15
 from octane.helpers import network
19
-from octane import magic_consts
20 16
 from octane.util import env as env_util
21 17
 from octane.util import maintenance
22 18
 from octane.util import ssh
23 19
 
24 20
 
25
-def disconnect_networks(env):
26
-    controllers = list(env_util.get_controllers(env))
27
-    for node in controllers:
28
-        deployment_info = env_util.get_astute_yaml(env, node)
29
-        network.delete_patch_ports(node, deployment_info)
30
-
31
-
32
-def connect_to_networks(env):
33
-    deployment_info = []
34
-    controllers = list(env_util.get_controllers(env))
35
-    backup_path = os.path.join(magic_consts.FUEL_CACHE,
36
-                               'deployment_{0}.orig'
37
-                               .format(env.id))
38
-    for filename in os.listdir(backup_path):
39
-        filepath = os.path.join(backup_path, filename)
40
-        with open(filepath) as info_file:
41
-            info = yaml.safe_load(info_file)
42
-            deployment_info.append(info)
43
-    for node in controllers:
44
-        for info in deployment_info:
45
-            if (info['role'] in ('primary-controller', 'controller')
46
-                    and info['uid'] == str(node.id)):
47
-                network.delete_overlay_networks(node, info)
48
-                network.create_patch_ports(node, info)
49
-
50
-
51 21
 def update_neutron_config(orig_env, seed_env):
52 22
     controllers = list(env_util.get_controllers(seed_env))
53 23
     tenant_id = env_util.cache_service_tenant_id(orig_env)
@@ -61,11 +31,30 @@ def update_neutron_config(orig_env, seed_env):
61 31
 def upgrade_control_plane(orig_id, seed_id):
62 32
     orig_env = environment_obj.Environment(orig_id)
63 33
     seed_env = environment_obj.Environment(seed_id)
34
+    controllers = list(env_util.get_controllers(seed_env))
64 35
     update_neutron_config(orig_env, seed_env)
65
-    maintenance.start_corosync_services(seed_env)
66
-    maintenance.start_upstart_services(seed_env)
67
-    disconnect_networks(orig_env)
68
-    connect_to_networks(seed_env)
36
+    # enable all services on seed env
37
+    if len(controllers) > 1:
38
+        maintenance.stop_cluster(seed_env)
39
+    else:
40
+        maintenance.start_corosync_services(seed_env)
41
+        maintenance.start_upstart_services(seed_env)
42
+    # disable cluster services on orig env
43
+    maintenance.stop_cluster(orig_env)
44
+    # switch networks to seed env
45
+    roles = ['primary-controller', 'controller']
46
+    # disable physical connectivity for orig env
47
+    for node, info in env_util.iter_deployment_info(orig_env, roles):
48
+        network.delete_patch_ports(node, info)
49
+    # enable physical connectivity for seed env
50
+    for node, info in env_util.iter_deployment_info(seed_env, roles):
51
+        network.delete_overlay_networks(node, info)
52
+        network.create_patch_ports(node, info)
53
+    # enable all services on seed env
54
+    if len(controllers) > 1:
55
+        maintenance.start_cluster(seed_env)
56
+        maintenance.start_corosync_services(seed_env)
57
+        maintenance.start_upstart_services(seed_env)
69 58
 
70 59
 
71 60
 class UpgradeControlPlaneCommand(cmd.Command):

+ 4
- 1
octane/commands/upgrade_node.py View File

@@ -55,7 +55,10 @@ def upgrade_node(env_id, node_ids, isolated=False, network_template=None):
55 55
     call_handlers('predeploy')
56 56
     if network_template:
57 57
         env_util.set_network_template(env, network_template)
58
-    env_util.deploy_nodes(env, nodes)
58
+    if isolated or len(nodes) == 1:
59
+        env_util.deploy_nodes(env, nodes)
60
+    else:
61
+        env_util.deploy_changes(env, nodes)
59 62
     call_handlers('postdeploy')
60 63
 
61 64
 

+ 1
- 0
octane/handlers/upgrade/__init__.py View File

@@ -16,6 +16,7 @@ from octane import handlers
16 16
 class UpgradeHandler(object):
17 17
     def __init__(self, node, env, isolated):
18 18
         self.node = node
19
+        self.orig_env = self.node.env
19 20
         self.env = env
20 21
         self.isolated = isolated
21 22
 

+ 3
- 0
octane/handlers/upgrade/ceph_osd.py View File

@@ -10,6 +10,7 @@
10 10
 # License for the specific language governing permissions and limitations
11 11
 # under the License.
12 12
 
13
+from octane.commands import prepare
13 14
 from octane.handlers import upgrade
14 15
 from octane.util import ceph
15 16
 from octane.util import node as node_util
@@ -22,9 +23,11 @@ class CephOsdUpgrade(upgrade.UpgradeHandler):
22 23
     def prepare(self):
23 24
         self.preserve_partition()
24 25
         ceph.set_osd_noout(self.env)
26
+        prepare.patch_puppet()
25 27
 
26 28
     def postdeploy(self):
27 29
         ceph.unset_osd_noout(self.env)
30
+        prepare.patch_puppet(revert=True)
28 31
 
29 32
     def preserve_partition(self):
30 33
         partition = 'ceph'

+ 34
- 6
octane/handlers/upgrade/compute.py View File

@@ -10,8 +10,10 @@
10 10
 # License for the specific language governing permissions and limitations
11 11
 # under the License.
12 12
 
13
+import logging
13 14
 import os.path
14 15
 import stat
16
+import subprocess
15 17
 
16 18
 from octane.handlers import upgrade
17 19
 from octane.helpers import disk
@@ -21,6 +23,8 @@ from octane.util import node as node_util
21 23
 from octane.util import plugin
22 24
 from octane.util import ssh
23 25
 
26
+LOG = logging.getLogger(__name__)
27
+
24 28
 
25 29
 class ComputeUpgrade(upgrade.UpgradeHandler):
26 30
     def prepare(self):
@@ -34,12 +38,36 @@ class ComputeUpgrade(upgrade.UpgradeHandler):
34 38
     def postdeploy(self):
35 39
         self.restore_iscsi_initiator_info()
36 40
         controller = env_util.get_one_controller(self.env)
37
-        ssh.call(
38
-            ["sh", "-c", ". /root/openrc; "
39
-             "nova service-enable node-{0} nova-compute".format(
40
-                 self.node.data['id'])],
41
-            node=controller,
42
-        )
41
+        # FIXME: Add more correct handling of case
42
+        # when node may have not full name in services data
43
+        try:
44
+            ssh.call(
45
+                ["sh", "-c", ". /root/openrc; "
46
+                 "nova service-enable {0} nova-compute".format(
47
+                     self.node.data['fqdn'])],
48
+                node=controller,
49
+            )
50
+        except subprocess.CalledProcessError as exc:
51
+            LOG.warn("Cannot start service 'nova-compute' on {0} "
52
+                     "by reason: {1}. Try again".format(
53
+                         self.node.data['fqdn'], exc))
54
+            ssh.call(
55
+                ["sh", "-c", ". /root/openrc; "
56
+                 "nova service-enable {0} nova-compute".format(
57
+                     self.node.data['fqdn'].split('.', 1)[0])],
58
+                node=controller,
59
+            )
60
+
61
+        sftp = ssh.sftp(self.node)
62
+
63
+        if self.orig_env.data["fuel_version"] == "6.1":
64
+            with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
65
+                for line in old:
66
+                    new.write(line)
67
+                    if line.startswith("[upgrade_levels]"):
68
+                        new.write("compute=juno\n")
69
+
70
+            ssh.call(["service", "nova-compute", "restart"], node=self.node)
43 71
 
44 72
     def evacuate_host(self):
45 73
         controller = env_util.get_one_controller(self.env)

+ 31
- 7
octane/handlers/upgrade/controller.py View File

@@ -37,7 +37,11 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
37 37
             self.env, self.node)
38 38
 
39 39
     def predeploy(self):
40
-        deployment_info = env_util.merge_deployment_info(self.env)
40
+        default_info = self.env.get_default_facts('deployment')
41
+        deployment_info = env_util.get_deployment_info(self.env)
42
+        network_data = self.env.get_network_data()
43
+        gw_admin = transformations.get_network_gw(network_data,
44
+                                                  "fuelweb_admin")
41 45
         if self.isolated:
42 46
             # From backup_deployment_info
43 47
             backup_path = os.path.join(
@@ -47,7 +51,7 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
47 51
             if not os.path.exists(backup_path):
48 52
                 os.makedirs(backup_path)
49 53
             # Roughly taken from Environment.write_facts_to_dir
50
-            for info in deployment_info:
54
+            for info in default_info:
51 55
                 if not info['uid'] == str(self.node.id):
52 56
                     continue
53 57
                 fname = os.path.join(
@@ -56,17 +60,20 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
56 60
                 )
57 61
                 with open(fname, 'w') as f:
58 62
                     yaml.safe_dump(info, f, default_flow_style=False)
59
-        for info in deployment_info:
60
-            if not info['uid'] == str(self.node.id):
63
+        for info in default_info:
64
+            if not (info['role'] == 'primary-controller' or
65
+                    info['uid'] == str(self.node.id)):
61 66
                 continue
62 67
             if self.isolated:
63 68
                 transformations.remove_ports(info)
64
-                endpoints = deployment_info[0]["network_scheme"]["endpoints"]
65
-                self.gateway = endpoints["br-ex"]["gateway"]
66
-                transformations.reset_gw_admin(info)
69
+                if info['uid'] == str(self.node.id):
70
+                    endpoints = info["network_scheme"]["endpoints"]
71
+                    self.gateway = endpoints["br-ex"]["gateway"]
72
+                transformations.reset_gw_admin(info, gw_admin)
67 73
             # From run_ping_checker
68 74
             info['run_ping_checker'] = False
69 75
             transformations.remove_predefined_nets(info)
76
+            deployment_info.append(info)
70 77
         self.env.upload_facts('deployment', deployment_info)
71 78
 
72 79
         tasks = self.env.get_deployment_tasks()
@@ -83,6 +90,23 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
83 90
                         self.service_tenant_id))
84 91
                 else:
85 92
                     new.write(line)
93
+        if self.orig_env.data["fuel_version"] == "6.1":
94
+            with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
95
+                for line in old:
96
+                    new.write(line)
97
+                    if line.startswith("[upgrade_levels]"):
98
+                        new.write("compute=juno\n")
99
+
100
+            nova_services = ssh.call_output(
101
+                ["bash", "-c",
102
+                 "initctl list | "
103
+                 "awk '/nova/ && /start/ {print $1}' | tr '\n' ' '"],
104
+                node=self.node
105
+            )
106
+
107
+            for nova_service in nova_services.split():
108
+                ssh.call(["service", nova_service, "restart"], node=self.node)
109
+
86 110
         ssh.call(['restart', 'neutron-server'], node=self.node)
87 111
         if self.isolated and self.gateway:
88 112
             # From restore_default_gateway

+ 10
- 7
octane/helpers/network.py View File

@@ -238,12 +238,14 @@ def delete_overlay_networks(node, host_config):
238 238
 
239 239
 def delete_port_ovs(bridge, port):
240 240
     bridges = port['bridges']
241
-    port_name = "%s--%s" % (bridges[0], bridges[1])
242
-    return ['ovs-vsctl', 'del-port', bridges[0], port_name]
241
+    port1_name = "%s--%s" % (bridges[0], bridges[1])
242
+    port2_name = "%s--%s" % (bridges[1], bridges[0])
243
+    return [['ovs-vsctl', 'del-port', bridges[0], port1_name],
244
+            ['ovs-vsctl', 'del-port', bridges[1], port2_name]]
243 245
 
244 246
 
245 247
 def delete_port_lnx(bridge, port):
246
-    return ['brctl', 'delif', bridge, port['name']]
248
+    return [['brctl', 'delif', bridge, port['name']]]
247 249
 
248 250
 
249 251
 delete_port_providers = {
@@ -256,8 +258,9 @@ def delete_patch_ports(node, host_config):
256 258
     for bridge in magic_consts.BRIDGES:
257 259
         port, provider = ts.get_patch_port_action(host_config, bridge)
258 260
         delete_port_cmd = delete_port_providers[provider]
259
-        cmd = delete_port_cmd(bridge, port)
260
-        ssh.call(cmd, node=node)
261
+        cmds = delete_port_cmd(bridge, port)
262
+        for cmd in cmds:
263
+            ssh.call(cmd, node=node)
261 264
 
262 265
 
263 266
 def create_port_ovs(bridge, port):
@@ -273,7 +276,7 @@ def create_port_ovs(bridge, port):
273 276
         return cmd
274 277
 
275 278
     cmds = []
276
-    tags = port.get('vlan_ids', ['', ''])
279
+    tags = port.get('vlan_ids') or port.get('tags', ['', ''])
277 280
     trunks = port.get('trunks', [])
278 281
     bridges = port.get('bridges', [])
279 282
     bridge_index = bridges.index(bridge)
@@ -282,7 +285,7 @@ def create_port_ovs(bridge, port):
282 285
         tag = tags[index]
283 286
         tags[index] = "tag=%s" % (str(tag),) if tag else ''
284 287
     trunk = ''
285
-    trunk_str = ','.join(trunks)
288
+    trunk_str = ','.join(map(str, trunks))
286 289
     if trunk_str:
287 290
         trunk = 'trunks=[%s]' % (trunk_str,)
288 291
     if bridges:

+ 8
- 0
octane/helpers/transformations.py View File

@@ -99,6 +99,14 @@ def remove_predefined_nets(host_config):
99 99
     return host_config
100 100
 
101 101
 
102
+def get_network_gw(data, network_name):
103
+    for net in data['networks']:
104
+        if net['name'] == network_name:
105
+            return net.get('gateway')
106
+    else:
107
+        return None
108
+
109
+
102 110
 def reset_gw_admin(host_config, gateway=None):
103 111
     if gateway:
104 112
         gw = gateway

+ 0
- 167
octane/lib/ceph.sh View File

@@ -1,167 +0,0 @@
1
-#!/bin/bash -xe
2
-
3
-SSH_ARGS="-o LogLevel=quiet"
4
-MON_STATE_PATH=/var/lib/ceph/mon
5
-
6
-extract_ceph_conf() {
7
-	sed -nr 's/.*-c ([^ ]+).*/\1/gp'
8
-}
9
-
10
-ceph_get_conf_dir() {
11
-    [ -z "$1" ] && die "no CIC node ID provided in args, exiting"
12
-    local ceph_args=$(ssh $SSH_ARGS root@$(get_host_ip_by_node_id $1) \
13
-        "pgrep 'ceph-mon' | xargs ps -fp | grep -m1 '^root '")
14
-    test -z "$ceph_args" &&
15
-        die "no ceph-mon process on node $1"
16
-    local config_path=$(echo $ceph_args | extract_ceph_conf)
17
-    config_path=${config_path:-/etc/ceph/ceph.conf}
18
-#    test -z "$config_path" &&
19
-#        die "Could not extract config_path from $ceph_args on node $1"
20
-    # we assume, ceph keyrings must be placed in ceph.conf directory
21
-    export CEPH_CONF_DIR=$(dirname $config_path)
22
-}
23
-
24
-ceph_extract_conf() {
25
-    [ -z "$1" ] && die "No 5.1.1 env ID provided as an arg, exiting"
26
-    check_env_exists $1 ||
27
-        die "Env $1 not found"
28
-    export CEPH_CONF_SRC_NODE=$(list_nodes $1 "controller" | head -1)
29
-    test -z "$CEPH_CONF_SRC_NODE" &&
30
-        die "No controllers found in Env $1"
31
-    local controller1_hostname=$(ssh $SSH_ARGS \
32
-        root@$(get_host_ip_by_node_id ${CEPH_CONF_SRC_NODE#node-}) hostname \
33
-        | cut -d. -f1)
34
-    local controller1_db_path=${MON_STATE_PATH}/ceph-${controller1_hostname}
35
-    ssh $SSH_ARGS $(get_host_ip_by_node_id ${CEPH_CONF_SRC_NODE#node-}) \
36
-        test -d $controller1_db_path  ||
37
-        die "$controller1_db_path not found at $CEPH_CONF_SRC_NODE"
38
-    ceph_get_conf_dir ${CEPH_CONF_SRC_NODE#node-}
39
-    test -z "$CEPH_CONF_DIR" &&
40
-        die "Cannot find Ceph conf dir on $CEPH_CONF_SRC_NODE, exiting"
41
-    ssh $SSH_ARGS root@$(get_host_ip_by_node_id ${CEPH_CONF_SRC_NODE#node-}) \
42
-        "tar cvf - $CEPH_CONF_DIR $controller1_db_path | gzip" \
43
-        | cat - > ${FUEL_CACHE}/env-$1-ceph.conf.tar.gz
44
-}
45
-
46
-ceph_set_new_mons() {
47
-    [ -z "$1" ] && die "No 5.1.1 env ID provided as an arg, exiting"
48
-    [ -z "$2" ] && die "no 6.0 env ID provided as an arg, exiting"
49
-    for env in "$@"; do
50
-        check_env_exists $env ||
51
-            die "Env $env not found"
52
-    done
53
-    local controller1=$(list_nodes $1 "controller" | head -1)
54
-    test -z "$controller1" &&
55
-        die "No controllers found in Env $1"
56
-    local controllers=$(list_nodes $2 "controller")
57
-    test -z "$controllers" &&
58
-        die "No controllers found in Env $1"
59
-    local controllers_hostnames=$(echo -n $controllers | xargs -I{} \
60
-        ssh $SSH_ARGS root@{} hostname | cut -d. -f1)
61
-    local source_controllers=$(ssh $SSH_AGS root@$controller1 \
62
-        cat ${CEPH_CONF_DIR}/ceph.conf \
63
-        | awk -F= '$1 = /mon_host/ {print gensub("^ ", "", "", $2)}')
64
-    local source_controllers_mask=$(echo ${source_controllers} | sed 's/ /|/g')
65
-    # init global vars for Ceph config values
66
-    export MON_INITIAL_MEMBERS=""
67
-    export MON_HOSTS=""
68
-    # collect avialable dst controllers
69
-    for ctrl_host in ${controllers}; do
70
-        ip_match=`ssh $SSH_ARGS $ctrl_host ip addr \
71
-            | grep -m1 -E "${source_controllers_mask}" \
72
-            | sed -r 's/[ ]+?inet ([^\/]+).*/\1/'`
73
-        test -z "$ip_match" && continue
74
-        export MON_INITIAL_MEMBERS="$MON_INITIAL_MEMBERS `ssh $SSH_ARGS $ctrl_host hostname | cut -d. -f1`"
75
-        export MON_HOSTS="$MON_HOSTS $ip_match"
76
-    done
77
-}
78
-
79
-ceph_push_update_conf() {
80
-    [ -z "$1" ] && die "no 6.0 env ID provided as an arg, exiting"
81
-    local dst_base_dir=""
82
-    local ctrl_host_db_path
83
-    local controller1_db_path=${MON_STATE_PATH}/ceph-${CEPH_CONF_SRC_NODE}
84
-    local ceph_conf_dir
85
-    local orig_env=$(get_env_by_node ${CEPH_CONF_SRC_NODE#node-})
86
-    for ctrl_host in ${MON_INITIAL_MEMBERS}; do
87
-        ctrl_host_db_path="${MON_STATE_PATH}/ceph-${ctrl_host}"
88
-        ceph_get_conf_dir ${ctrl_host#node-}
89
-        ssh $SSH_ARGS root@$(get_host_ip_by_node_id ${ctrl_host#node-}) \
90
-            "rm -rf $CEPH_CONF_DIR;
91
-             mkdir $CEPH_CONF_DIR;
92
-             test -d $ctrl_host_db_path && rm -rf $ctrl_host_db_path;
93
-             :"
94
-        cat ${FUEL_CACHE}/env-${orig_env}-ceph.conf.tar.gz \
95
-            | ssh $SSH_ARGS $ctrl_host "gunzip | tar xvf - -C /"
96
-        ssh $SSH_ARGS root@$(get_host_ip_by_node_id ${ctrl_host#node-}) "
97
-        set -ex
98
-        mv $controller1_db_path $ctrl_host_db_path
99
-        rm $ctrl_host_db_path/sysvinit || echo "File sysvinit not found"
100
-        touch $ctrl_host_db_path/upstart
101
-        sed -i'' 's/^mon_initial_members =.*/mon_initial_members =$MON_INITIAL_MEMBERS/g;
102
-              s/^mon_host =.*/mon_host =$MON_HOSTS/g;
103
-              s/^host =.*/host = ${ctrl_host}/g' ${CEPH_CONF_DIR}/ceph.conf 
104
-
105
-        cat ${CEPH_CONF_DIR}/ceph.conf | awk -F= '
106
-            \$1 ~ /^fsid/ {
107
-                fsid = \$2
108
-            } 
109
-            \$1 ~ /^mon_initial_members/ {
110
-                split(\$2, members, \" \")
111
-            }
112
-            \$1 ~ /^mon_host/ {
113
-                split(\$2, host, \" \")
114
-            }
115
-            END {
116
-                printf(\"monmaptool --fsid %s --clobber --create \", fsid)
117
-                for (i in members) {
118
-                    printf(\" --add %s %s\", members[i], host[i]);
119
-                } 
120
-                printf(\" /tmp/monmap\n\")
121
-            }' | sh -
122
-
123
-        ceph-mon -i ${ctrl_host} --inject-monmap /tmp/monmap 
124
-      " 
125
-    done
126
-    for ctrl_host in "${MON_INITIAL_MEMBERS# }"; do
127
-        ssh root@$ctrl_host "restart ceph-mon id=$ctrl_host"
128
-    done
129
-}
130
-
131
-import_bootstrap_osd() {
132
-    local node
133
-    [ -z "$1" ] && die "No env ID provided, exiting"
134
-    node=$(list_nodes $1 controller | head -1)
135
-    ssh root@$(get_host_ip_by_node_id ${node#node-}) \
136
-        "ceph auth import -i /root/ceph.bootstrap-osd.keyring;
137
-        ceph auth caps client.bootstrap-osd mon 'allow profile bootstrap-osd'"
138
-}
139
-
140
-prepare_ceph_osd_upgrade() {
141
-    local seed_id
142
-    local nodes
143
-    local node
144
-    [ -z "${seed_id:=$1}" ] && die "No 6.0 env ID provided, exiting"
145
-    nodes=$(list_nodes $seed_id '(controller)')
146
-    for node in $nodes
147
-        do
148
-            ssh root@$node sh -c "'
149
-                f=\$(mktemp)
150
-                awk -f /dev/stdin /etc/ceph/ceph.conf > \$f
151
-                chmod 644 \$f && mv \$f /etc/ceph/ceph.conf
152
-            '" <<EOF
153
-BEGIN {
154
-    flag = 0
155
-}
156
-/^$|^\[/ && flag == 1 {
157
-    flag = 0;
158
-    print "osd_crush_update_on_start = false"
159
-}
160
-/^\[global\]$/ {
161
-    flag = 1
162
-}
163
-{ print \$0 }
164
-EOF
165
-        done
166
-}
167
-

+ 0
- 133
octane/lib/functions.sh View File

@@ -1,133 +0,0 @@
1
-#!/bin/bash
2
-
3
-pycmd() {
4
-    if ! python -c 'import octane'; then
5
-        yum install -y python-paramiko
6
-        pip install --no-index -e "$CWD/.." ||
7
-        die "Cannot install octane, exiting"
8
-    fi
9
-    local opts=""
10
-    if shopt -qo xtrace; then
11
-        opts="--debug -v"
12
-    fi
13
-    octane $opts "$@"
14
-    exit $?
15
-}
16
-
17
-check_deployment_status() {
18
-# Verify operational status of environment.
19
-    [ -z "$1" ] && die "No env ID provided, exiting"
20
-    local status=$(fuel env --env $1 \
21
-        | awk -F"|" '/^'$1'/{print $2}' \
22
-        | tr -d ' ')
23
-    [ "$status" == 'new' ] || die "Environment is not operational, exiting"
24
-}
25
-
26
-list_ports() {
27
-# On the host identified by first argument, list ports in bridge, identified by
28
-# second argument.
29
-    [ -z "$1" ] && die "No hostname and bridge name provided, exiting"
30
-    [ -z "$2" ] && die "No bridge name provided, exiting"
31
-    echo -n "$(ssh root@$1 ovs-vsctl list-ports $2)"
32
-}
33
-
34
-create_patch_ports() {
35
-# Create patch interface to connect logical interface to Public or Management
36
-# network to the physical interface to that network.
37
-    local node
38
-    [ -d ${FUEL_CACHE}/deployment_$1.orig ] || die "Deployment information not found for env $1, exiting"
39
-    [ -z "$1" ] && die "No env ID provided, exiting"
40
-    local br_name=$2
41
-    local nodes=$(list_nodes $1 'controller')
42
-    for node in $nodes
43
-        do
44
-            local filename=$(ls ${FUEL_CACHE}/deployment_$1.orig/*_${node#node-}.yaml \
45
-                | head -1)
46
-            ${BINPATH}/create-controller-ports $filename $br_name \
47
-                | xargs -I {} ssh root@$node {}
48
-        done
49
-}
50
-
51
-delete_patch_ports() {
52
-    local br_name
53
-    local ph_name
54
-    local node_ids
55
-    local node_id
56
-    local node
57
-    [ -z "$1" ] && die "No env ID and bridge name provided, exiting"
58
-    [ -z "$2" ] && die "No bridge name provided, exiting"
59
-    br_name=$2
60
-    for node in $(list_nodes $1 controller)
61
-        do
62
-            ph_name=$(list_ports $node $br_name \
63
-                | tr -d '"' \
64
-                | sed -nre 's/'$br_name'--(.*)/\1/p')
65
-
66
-            ssh root@${node} ovs-vsctl del-port $br_name ${br_name}--${ph_name}
67
-            ssh root@${node} ovs-vsctl del-port $ph_name ${ph_name}--${br_name}
68
-        done
69
-}
70
-
71
-upgrade_cics() {
72
-    [ -z "$1" ] && die "No 5.1.1 env ID provided, exiting"
73
-    [ -z "$2" ] && die "No 6.0 env ID provided, exiting"
74
-    check_deployment_status $2
75
-    set_pssh_hosts $1 && {
76
-        enable_apis
77
-    } && unset PSSH_RUN
78
-    set_pssh_hosts $2 && {
79
-        start_corosync_services
80
-        start_upstart_services
81
-    } && unset PSSH_RUN
82
-    for br_name in br-ex br-mgmt br-prv;
83
-    do
84
-        delete_patch_ports $1 $br_name
85
-    done
86
-    for br_name in br-ex br-mgmt;
87
-    do
88
-        create_patch_ports $2 $br_name
89
-    done
90
-    list_nodes $1 compute | xargs -I{} ${BINPATH}/upgrade-nova-compute.sh {}
91
-}
92
-
93
-upgrade_ceph() {
94
-    [ -z "$1" ] && die "No 5.1 and 6.0 env IDs provided, exiting"
95
-    [ -z "$2" ] && die "No 6.0 env ID provided, exiting"
96
-    ceph_extract_conf $1
97
-    ceph_set_new_mons "$@"
98
-    ceph_push_update_conf $2
99
-    import_bootstrap_osd $2
100
-    prepare_ceph_osd_upgrade $2
101
-}
102
-
103
-cleanup_nova_services() {
104
-    [ -z "$1" ] && die "No 6.0 env ID provided, exiting"
105
-    local cic=$(list_nodes $1 controller | head -1)
106
-    ssh root@${cic} '. /root/openrc;
107
-    nova service-list | grep nova \
108
-    | grep -Ev "('$(list_nodes $1 "(controller|compute|ceph-osd)" \
109
-    | sed ':a;N;$!ba;s/\n/|/g')')"' | awk -F \| '{print($2)}' | tr -d ' ' \
110
-    | xargs -I{} ssh root@${cic} ". /root/openrc; nova service-delete {}"
111
-}
112
-
113
-cleanup_neutron_services() {
114
-    [ -z "$1" ] && die "No 6.0 env ID provided, exiting"
115
-    local cic=$(list_nodes $1 controller | head -1)
116
-    ssh root@${cic} '. /root/openrc;
117
-    neutron agent-list | grep neutron \
118
-    | grep -Ev "('$(list_nodes $1 "(controller|compute|ceph-osd)" \
119
-    | sed ':a;N;$!ba;s/\n/|/g')')"' | awk -F \| '{print($2)}' | tr -d ' ' \
120
-    | xargs -I{} ssh root@${cic} ". /root/openrc; neutron agent-delete {}"
121
-}
122
-
123
-delete_fuel_resources() {
124
-    [ -z "$1" ] && die "No env ID provided, exiting"
125
-    local node=$(list_nodes $1 controller | head -1)
126
-    local host=$(get_host_ip_by_node_id ${node#node-})
127
-    scp $HELPER_PATH/delete_fuel_resources.py root@$host:/tmp
128
-    ssh root@$host ". openrc; python /tmp/delete_fuel_resources.py"
129
-}
130
-
131
-cleanup_fuel() {
132
-   revert_prepare_fuel
133
-}

+ 0
- 38
octane/lib/maintenance.sh View File

@@ -1,38 +0,0 @@
1
-#!/bin/bash
2
-
3
-export SVC_LIST="/root/services_list"
4
-export SVC_LIST_TMP="${SVC_LIST}.tmp"
5
-
6
-enable_apis() {
7
-    $PSSH_RUN "sed -i '/use_backend maintenance if TRUE/d' \
8
-        \$(grep -L 'mode *tcp' /etc/haproxy/conf.d/*)"
9
-    $PSSH_RUN "pkill haproxy"
10
-}
11
-
12
-start_corosync_services() {
13
-    $PSSH_RUN "pcs resource \
14
-    | awk '/Clone Set:/ {print \$4; getline; print \$1}' \
15
-    | sed 'N;s/\n/ /' | tr -d :[] \
16
-    | grep Stopped | awk '{print \$1}' \
17
-    | xargs -I@ sh -c \"crm resource start @\""
18
-}
19
-
20
-start_upstart_services() {
21
-    local command=$(cat <<EOF
22
-crm_services=\$(pcs resource \
23
-    | awk '/Clone Set:/ {print \$4; getline; print \$1}' \
24
-    | sed 'N;s/\n/ /' \
25
-    | tr -d ':[]' | awk '{print substr(\$1,3)}');
26
-for s in \$(<${SVC_LIST});
27
-do
28
-    for cs in \$crm_services; do
29
-        if [ "\$cs" == "\$s" ]; then
30
-            continue 2;
31
-        fi;
32
-    done;
33
-    start \$s;
34
-done;
35
-EOF
36
-)
37
-    $PSSH_RUN "$command"
38
-}

+ 0
- 16
octane/lib/patch.sh View File

@@ -1,16 +0,0 @@
1
-#!/bin/bash -xe
2
-
3
-run=".state"
4
-[ -d "$run" ] || mkdir -p "$run"
5
-
6
-patch_fuel_components() {
7
-    local cmp
8
-    [ -z "$1" ] && die "No component name provided, exiting"
9
-    for cmp in "$@";
10
-    do
11
-        [ -d "$PATCH_DIR/$cmp" ] || die "No dir for component $cmp, exiting"
12
-        pushd "$PATCH_DIR/$cmp"
13
-        [ -x "./update.sh" ] && ./update.sh
14
-        popd
15
-    done
16
-}

+ 0
- 33
octane/lib/revert.sh View File

@@ -1,33 +0,0 @@
1
-# vim: syntax=sh
2
-REVERT_PATH="$(readlink -e "$BASH_SOURCE")"
3
-OCTANE_PATH="$(readlink -e "$(dirname "$REVERT_PATH")/..")"
4
-
5
-## functions
6
-
7
-revert_prepare_fuel () {
8
-    revert_patch_fuel_components puppet
9
-    revert_all_patches
10
-}
11
-
12
-revert_deployment_tasks() {
13
-    [ -z "$1" ] && die "No environment ID provided, exiting"
14
-    [ -d "$FUEL_CACHE" ] &&
15
-    [ -d "${FUEL_CACHE}/cluster_$1" ] &&
16
-    cp -pR "${FUEL_CACHE}/cluster_$1.orig" "${FUEL_CACHE}/cluster_$1"
17
-}
18
-
19
-revert_patch_fuel_components() {
20
-    local cmp
21
-    [ -z "$1" ] && die "No component name provided, exiting"
22
-    for cmp in "$@";
23
-    do
24
-        [ -d "$PATCH_DIR/$cmp" ] || die "No dir for component $cmp, exiting"
25
-        pushd "$PATCH_DIR/$cmp"
26
-        [ -x "./revert.sh" ] && ./revert.sh
27
-        popd
28
-    done
29
-}
30
-
31
-function revert_all_patches() { 
32
-        PATCH_EXTRA_ARGS="-R" patch_all_containers
33
-} 

+ 0
- 51
octane/lib/utils.sh View File

@@ -1,51 +0,0 @@
1
-#!/bin/bash
2
-
3
-yell() {
4
-    echo "$*" >&2
5
-}
6
-
7
-die() {
8
-    yell "${FUNCNAME[1]}: ${1}"
9
-    exit ${2:-1}
10
-}
11
-
12
-check_env_exists() {
13
-    [ -z "$1" ] && die "No environment ID provided, exiting"
14
-	local env_id=$1
15
-    fuel env --env-id $env_id  | grep -qE "$env_id[ ]+?\|"
16
-}
17
-
18
-set_pssh_hosts() {
19
-    [ -z "$1" ] && die "No environment ID provided, exiting"
20
-    PSSH_RUN="pssh -i"
21
-    for node in $(list_nodes $1 ${2:controller});
22
-    do
23
-        PSSH_RUN+=" -H $node"
24
-    done
25
-}
26
-
27
-get_env_by_node() {
28
-    [ -z "$1" ] && die "No node ID provided, exiting"
29
-    echo "$(fuel node --node $1 \
30
-        | awk -F\| '/^'$1'/ {gsub(" ", "", $4); print $4}')"
31
-}
32
-
33
-get_host_ip_by_node_id() {
34
-    [ -z "$1" ] && die "No node ID provided, exiting"
35
-    echo $(fuel node | awk -F"|" '/^'$1' /{print($5)}' | tr -d ' ')
36
-}
37
-
38
-get_node_online() {
39
-    [ -z "$1" ] && die "No node ID provided, exiting"
40
-    fuel node --node "$1" | tail -1 | awk -F\| '{gsub(" ", "", $9);print($9)}'
41
-}
42
-
43
-list_nodes() {
44
-    local roles_re
45
-    [ -z "$1" ] && die "No env ID provided, exiting"
46
-    roles_re=${2:-controller}
47
-    echo "$(fuel node --env $1 \
48
-        | awk -F\| '($7 ~ /'$roles_re'/ || $8 ~ /'$roles_re'/) && $2 ~ /'$3'/ {
49
-                gsub(" ","",$1); print "node-" $1
50
-            }')"
51
-}

+ 2
- 2
octane/magic_consts.py View File

@@ -17,8 +17,8 @@ PATCHES = [("nailgun", "/usr/lib/python2.6/site-packages/nailgun/extensions"
17 17
             "/cluster_upgrade/", "patches/nailgun-clone-ips.patch")]
18 18
 # TODO: use pkg_resources for patches
19 19
 CWD = os.path.dirname(__file__)  # FIXME
20
-FUEL_CACHE = "/tmp/octane/deployment"  # TODO: we shouldn't need this
21
-PUPPET_DIR = "/etc/puppet/2014.2.2-6.1/modules"
20
+FUEL_CACHE = "/tmp"  # TODO: we shouldn't need this
21
+PUPPET_DIR = "/etc/puppet/2015.1.0-7.0/modules"
22 22
 BOOTSTRAP_INITRAMFS = "/var/www/nailgun/bootstrap/initramfs.img"
23 23
 
24 24
 SSH_KEYS = ['/root/.ssh/id_rsa', '/root/.ssh/bootstrap.rsa']

+ 134
- 0
octane/patches/puppet/ceph/patch View File

@@ -0,0 +1,134 @@
1
+commit 58895ee6973857fa8b4ee811e7dfa5005ae22aa1 (HEAD, puppet-tests/review/oleg_gelbukh/bp/partition-preservation, review/oleg_gelbukh/bp/partition-preservation)
2
+Author: Oleg Gelbukh <ogelbukh@mirantis.com>
3
+Date:   Mon Jul 20 13:29:09 2015 +0000
4
+
5
+    Support Ceph OSD devices with existing data set
6
+    
7
+    Partition preservation feature allows to preserve data on Ceph OSD device, but
8
+    later on Puppet will run 'ceph-deploy prepare' on every Ceph device in a system.
9
+    This call destroys data set on those devices.
10
+    
11
+    To preserve data on Ceph OSD devices through deployment process, we need to
12
+    check if the device has Ceph data and if so, skip running 'ceph-deploy prepare'
13
+    on that device. Only prepared devices must be activated to avoid deployment
14
+    failure.
15
+    
16
+    It was noted that the bug #1474510 causes ceph-osd service to start improperly,
17
+    thus preventing ceph-all init script from activating all the existing OSD
18
+    devices on boot.
19
+    
20
+    Change-Id: I667fa6aab9d6f46c73bfb8ca0e267afede6049fb
21
+    Implements: blueprint partition-preservation
22
+
23
+diff --git a/deployment/puppet/ceph/manifests/osds.pp b/deployment/puppet/ceph/manifests/osds.pp
24
+index 3281415..a872bb8 100644
25
+--- a/deployment/puppet/ceph/manifests/osds.pp
26
++++ b/deployment/puppet/ceph/manifests/osds.pp
27
+@@ -1,4 +1,12 @@
28
+-# prepare and bring online the devices listed in $::ceph::osd_devices
29
++# == Class: ceph::osd
30
++#
31
++# Prepare and bring online the OSD devices
32
++#
33
++# ==== Parameters
34
++#
35
++# [*devices*]
36
++# (optional) Array. This is the list of OSD devices identified by the facter.
37
++#
38
+ class ceph::osds (
39
+   $devices = $::ceph::osd_devices,
40
+ ){
41
+diff --git a/deployment/puppet/ceph/manifests/osds/osd.pp b/deployment/puppet/ceph/manifests/osds/osd.pp
42
+index b8fd18e..153b84d 100644
43
+--- a/deployment/puppet/ceph/manifests/osds/osd.pp
44
++++ b/deployment/puppet/ceph/manifests/osds/osd.pp
45
+@@ -1,3 +1,7 @@
46
++# == Define: ceph::osds::osd
47
++#
48
++# Prepare and activate OSD nodes on the node
49
++#
50
+ define ceph::osds::osd () {
51
+ 
52
+   # ${name} format is DISK[:JOURNAL]
53
+@@ -18,8 +22,8 @@ define ceph::osds::osd () {
54
+     tries     => 2, # This is necessary because of race for mon creating keys
55
+     try_sleep => 1,
56
+     logoutput => true,
57
+-    unless    => "grep -q ${data_device_name} /proc/mounts",
58
+-  } ->
59
++    unless    => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, active' -e '${data_device_name} ceph data, prepared'",
60
++  } -> Exec["ceph-deploy osd activate ${deploy_device_name}"]
61
+ 
62
+   exec { "ceph-deploy osd activate ${deploy_device_name}":
63
+     command   => "ceph-deploy osd activate ${deploy_device_name}",
64
+@@ -27,7 +31,7 @@ define ceph::osds::osd () {
65
+     tries     => 3,
66
+     logoutput => true,
67
+     timeout   => 0,
68
+-    unless    => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|${data_device_name}\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
69
++    onlyif    => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, prepared'",
70
+   }
71
+ 
72
+ }
73
+diff --git a/deployment/puppet/ceph/spec/classes/osds__spec.rb b/deployment/puppet/ceph/spec/classes/osds__spec.rb
74
+index b4b7c1b..157bcea 100644
75
+--- a/deployment/puppet/ceph/spec/classes/osds__spec.rb
76
++++ b/deployment/puppet/ceph/spec/classes/osds__spec.rb
77
+@@ -19,7 +19,7 @@ describe 'ceph::osds', :type => :class do
78
+   end
79
+ 
80
+   context 'Class ceph::osds with devices and journals' do
81
+-    let (:params) {{ :devices => ['/dev/sdc1:/dev/sdc2', '/dev/sdd1:/dev/sdd2' ] }}
82
++    let (:params) {{ :devices => ['/dev/sdc1:/dev/sdc2', '/dev/sdd1:/dev/sdd2'] }}
83
+ 
84
+     it { should contain_firewall('011 ceph-osd allow') }
85
+     it { should contain_ceph__osds__osd('/dev/sdc1:/dev/sdc2') }
86
+diff --git a/deployment/puppet/ceph/spec/defines/osd__spec.rb b/deployment/puppet/ceph/spec/defines/osd__spec.rb
87
+index b510da3..9c54569 100644
88
+--- a/deployment/puppet/ceph/spec/defines/osd__spec.rb
89
++++ b/deployment/puppet/ceph/spec/defines/osd__spec.rb
90
+@@ -2,7 +2,7 @@ require 'spec_helper'
91
+ 
92
+ describe 'ceph::osds::osd', :type => :define do
93
+   let :facts do
94
+-    { :hostname => 'test.example', }
95
++    { :hostname => 'test.example' }
96
+   end
97
+ 
98
+   context 'Simple test' do
99
+@@ -15,7 +15,7 @@ describe 'ceph::osds::osd', :type => :define do
100
+       'tries'     => 2,
101
+       'try_sleep' => 1,
102
+       'logoutput' => true,
103
+-      'unless'    => "grep -q /dev/svv /proc/mounts",
104
++      'unless'    => "ceph-disk list | fgrep -q -e '/dev/svv ceph data, active' -e '/dev/svv ceph data, prepared'",
105
+       )
106
+     }
107
+     it { should contain_exec("ceph-deploy osd activate test.example:/dev/svv").with(
108
+@@ -24,7 +24,7 @@ describe 'ceph::osds::osd', :type => :define do
109
+       'tries'     => 3,
110
+       'logoutput' => true,
111
+       'timeout'   => 0,
112
+-      'unless'    => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|/dev/svv\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
113
++      'onlyif'    => "ceph-disk list | fgrep -q -e '/dev/svv ceph data, prepared'",
114
+     )
115
+     }
116
+   end
117
+@@ -38,7 +38,7 @@ describe 'ceph::osds::osd', :type => :define do
118
+       'tries'     => 2,
119
+       'try_sleep' => 1,
120
+       'logoutput' => true,
121
+-      'unless'    => "grep -q /dev/sdd /proc/mounts",
122
++      'unless'    => "ceph-disk list | fgrep -q -e '/dev/sdd ceph data, active' -e '/dev/sdd ceph data, prepared'",
123
+       )
124
+     }
125
+     it { should contain_exec("ceph-deploy osd activate test.example:/dev/sdd:/dev/journal").with(
126
+@@ -47,7 +47,7 @@ describe 'ceph::osds::osd', :type => :define do
127
+       'tries'     => 3,
128
+       'logoutput' => true,
129
+       'timeout'   => 0,
130
+-      'unless'    => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|/dev/sdd\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
131
++      'onlyif'    => "ceph-disk list | fgrep -q -e '/dev/sdd ceph data, prepared'",
132
+       )
133
+     }
134
+   end

+ 67
- 0
octane/tests/test_env.py View File

@@ -25,6 +25,73 @@ def mock_os_path(mocker):
25 25
     return res
26 26
 
27 27
 
28
+def test_find_node_deployment_info():
29
+    roles = ['controller', 'primary-controller']
30
+    node = mock.Mock()
31
+    node.id = 1
32
+    res = env_util.find_node_deployment_info(node, roles, DEPLOYMENT_INFO)
33
+    assert res == DEPLOYMENT_INFO[0]
34
+
35
+
36
+def test_find_node_deployment_info_none():
37
+    roles = ['controller', 'primary-controller']
38
+    node = mock.Mock()
39
+    node.id = 2
40
+    res = env_util.find_node_deployment_info(node, roles, DEPLOYMENT_INFO)
41
+    assert res is None
42
+
43
+
44
+DEPLOYMENT_INFO = [{
45
+    'uid': '1',
46
+    'role': 'primary-controller',
47
+    'nodes': [{
48
+        'uid': '1',
49
+        'role': 'primary-controller',
50
+        'name': 'test',
51
+    }, {
52
+        'uid': '1',
53
+        'role': 'zabbix',
54
+        'name': 'test',
55
+    }, {
56
+        'uid': '2',
57
+        'role': 'compute',
58
+        'name': 'test2',
59
+    }],
60
+}, {
61
+    'uid': '1',
62
+    'role': 'zabbix',
63
+    'nodes': [{
64
+        'uid': '1',
65
+        'role': 'primary-controller',
66
+        'name': 'test',
67
+    }, {
68
+        'uid': '1',
69
+        'role': 'zabbix',
70
+        'name': 'test',
71
+    }, {
72
+        'uid': '2',
73
+        'role': 'compute',
74
+        'name': 'test2',
75
+    }],
76
+}, {
77
+    'uid': '2',
78
+    'role': 'compute',
79
+    'nodes': [{
80
+        'uid': '1',
81
+        'role': 'primary-controller',
82
+        'name': 'test',
83
+    }, {
84
+        'uid': '1',
85
+        'role': 'zabbix',
86
+        'name': 'test',
87
+    }, {
88
+        'uid': '2',
89
+        'role': 'compute',
90
+        'name': 'test2',
91
+    }],
92
+}]
93
+
94
+
28 95
 def test_parse_tenant_get():
29 96
     res = env_util.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
30 97
     assert res == 'e26c8079d61f46c48f9a6d606631ee5e'

+ 88
- 0
octane/tests/test_maintenance.py View File

@@ -11,6 +11,8 @@
11 11
 # under the License.
12 12
 
13 13
 import mock
14
+import pytest
15
+from xml.etree import ElementTree
14 16
 
15 17
 from octane.util import maintenance
16 18
 from octane.util import subprocess
@@ -21,6 +23,32 @@ def test_get_crm_services():
21 23
     assert sorted(res) == CRM_XML_PARSE_RESULT
22 24
 
23 25
 
26
+@pytest.mark.parametrize("resource_list,status,expected_result", [
27
+    (["master_p_rabbitmq-server", "vip__management_old"], False, False),
28
+    (["master_p_rabbitmq-server", "vip__management_old"], True, False),
29
+    (["master_p_rabbitmq-server", "p_ceilometer-alarm-evaluator"], False,
30
+     True),
31
+    (["clone_p_neutron-metadata-agent", "vip__management_old",
32
+      "group__zabbix-server"], True, True),
33
+    (["test1", "vip__management_old"], True, False),
34
+    (["test1", "test2"], False, True),
35
+])
36
+def test_resources_synced(resource_list, status, expected_result):
37
+    res = maintenance.is_resources_synced(resource_list, CRM_XML_STATUS_SAMPLE,
38
+                                          status)
39
+    assert res is expected_result
40
+
41
+
42
+def test_resources_status():
43
+    data = ElementTree.fromstring(CRM_XML_STATUS_SAMPLE)
44
+    resources = next(el for el in data if el.tag == 'resources')
45
+
46
+    result = []
47
+    for resource in resources:
48
+        result.append(maintenance.is_resource_active(resource))
49
+    assert result == [True, False, False, True, True]
50
+
51
+
24 52
 def test_stop_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
25 53
                                 mock_subprocess, node):
26 54
     get_one_controller = mocker.patch('octane.util.env.get_one_controller')
@@ -34,6 +62,9 @@ def test_stop_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
34 62
 
35 63
     mocker.patch('time.sleep')
36 64
 
65
+    wait_for_services = \
66
+        mocker.patch.object(maintenance, 'wait_for_corosync_services_sync')
67
+
37 68
     maintenance.stop_corosync_services('env')
38 69
 
39 70
     assert not mock_subprocess.called
@@ -41,6 +72,8 @@ def test_stop_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
41 72
     mock_ssh_call_output.assert_called_once_with(['cibadmin', '--query',
42 73
                                                   '--scope', 'resources'],
43 74
                                                  node=node)
75
+    assert wait_for_services.call_args_list == \
76
+        [mock.call('env', ['s1', 's2'], 'stop')]
44 77
     assert mock_ssh_call.call_args_list == [
45 78
         mock.call(['crm', 'resource', 'stop', 's1'], node=node),
46 79
         mock.call(['crm', 'resource', 'stop', 's1'], node=node),
@@ -57,10 +90,16 @@ def test_start_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
57 90
     mock_ssh_call.side_effect = \
58 91
         [None, subprocess.CalledProcessError(1, 'cmd'), None]
59 92
 
93
+    wait_for_services = \
94
+        mocker.patch.object(maintenance, 'wait_for_corosync_services_sync')
95
+
60 96
     maintenance.start_corosync_services('env')
61 97
 
62 98
     mock_ssh_call_output.assert_called_once_with(
63 99
         ['cibadmin', '--query', '--scope', 'resources'], node=node)
100
+
101
+    assert wait_for_services.call_args_list == \
102
+        [mock.call('env', ['test_service1', 'test_service2'], 'start')]
64 103
     assert mock_ssh_call.call_args_list == [
65 104
         mock.call(['crm', 'resource', 'start', 'test_service1'], node=node),
66 105
         mock.call(['crm', 'resource', 'start', 'test_service2'], node=node),
@@ -458,12 +497,61 @@ CRM_XML_SAMPLE = """
458 497
 </resources>
459 498
 """[1:]  # noqa
460 499
 CRM_XML_PARSE_RESULT = [
500
+    'clone_p_dns',
501
+    'clone_p_haproxy',
461 502
     'clone_p_heat-engine',
503
+    'clone_p_mysql',
462 504
     'clone_p_neutron-dhcp-agent',
463 505
     'clone_p_neutron-l3-agent',
464 506
     'clone_p_neutron-metadata-agent',
465 507
     'clone_p_neutron-plugin-openvswitch-agent',
508
+    'clone_p_ntp',
509
+    'clone_p_vrouter',
466 510
     'group__zabbix-server',
511
+    'master_p_conntrackd',
512
+    'master_p_rabbitmq-server',
467 513
     'p_ceilometer-agent-central',
468 514
     'p_ceilometer-alarm-evaluator',
515
+    'vip__management',
516
+    'vip__public',
517
+    'vip__vrouter',
518
+    'vip__vrouter_pub'
469 519
 ]
520
+CRM_XML_STATUS_SAMPLE = """
521
+<crm_mon version="1.1.12">
522
+    <resources>
523
+        <resource id="vip__management_old" resource_agent="ocf::mirantis:ns_IPaddr2" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
524
+            <node name="node-2" id="node-2" cached="false"/>
525
+        </resource>
526
+        <resource id="p_ceilometer-alarm-evaluator" resource_agent="ocf::mirantis:ceilometer-alarm-evaluator" role="Started" active="false" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0" />
527
+        <clone id="master_p_rabbitmq-server" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false" >
528
+            <resource id="p_rabbitmq-server" resource_agent="ocf::mirantis:rabbitmq-server" role="Master" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
529
+                <node name="node-3" id="node-3" cached="false"/>
530
+            </resource>
531
+            <resource id="p_rabbitmq-server" resource_agent="ocf::mirantis:rabbitmq-server" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
532
+                <node name="node-2" id="node-2" cached="false"/>
533
+            </resource>
534
+            <resource id="p_rabbitmq-server" resource_agent="ocf::mirantis:rabbitmq-server" role="Stopped" active="false" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0" />
535
+        </clone>
536
+        <clone id="clone_p_neutron-metadata-agent" >
537
+            <resource id="p_neutron-metadata-agent" resource_agent="ocf::mirantis:neutron-agent-metadata" active="true" >
538
+                <node name="node-3" id="node-3" cached="false"/>
539
+            </resource>
540
+            <resource id="p_neutron-metadata-agent" resource_agent="ocf::mirantis:neutron-agent-metadata" active="true" >
541
+                <node name="node-2" id="node-2" cached="false"/>
542
+            </resource>
543
+            <resource id="p_neutron-metadata-agent" resource_agent="ocf::mirantis:neutron-agent-metadata" active="true" >
544
+                <node name="node-5" id="node-5" cached="false"/>
545
+            </resource>
546
+        </clone>
547
+        <group id="group__zabbix-server" number_resources="2" >
548
+             <resource id="vip__zbx_vip_mgmt" resource_agent="ocf::fuel:ns_IPaddr2" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
549
+                 <node name="node-6" id="6" cached="false"/>
550
+             </resource>
551
+             <resource id="p_zabbix-server" resource_agent="ocf::fuel:zabbix-server" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
552
+                 <node name="node-6" id="6" cached="false"/>
553
+             </resource>
554
+        </group>
555
+    </resources>
556
+</crm_mon>
557
+"""[1:]  # noqa

+ 223
- 0
octane/tests/test_network.py View File

@@ -12,6 +12,7 @@
12 12
 import subprocess
13 13
 
14 14
 from mock import call
15
+from mock import Mock
15 16
 from octane.helpers import network
16 17
 
17 18
 
@@ -87,3 +88,225 @@ def test_create_overlay_network(mocker):
87 88
                                     node1.id)
88 89
 
89 90
     assert mock_ssh.call_args_list == expected_args
91
+
92
+
93
+def test_delete_overlay_network(mocker):
94
+    node = Mock()
95
+    deployment_info = {
96
+        'network_scheme': {
97
+            'transformations': [{
98
+                'action': 'add-br',
99
+                'name': 'br-ex',
100
+                'provider': 'ovs',
101
+            }, {
102
+                'action': 'add-br',
103
+                'name': 'br-mgmt',
104
+            }]
105
+        }
106
+    }
107
+
108
+    mock_ssh = mocker.patch('octane.util.ssh.call')
109
+
110
+    mock_ovs_tuns = mocker.patch('octane.helpers.network.list_tunnels_ovs')
111
+    mock_ovs_tuns.return_value = ['br-ex--gre-10.10.10.2']
112
+
113
+    mock_lnx_tun = mocker.patch('octane.helpers.network.list_tunnels_lnx')
114
+    mock_lnx_tun.return_value = ['gre3-3']
115
+
116
+    expected_args = [
117
+        call(['ovs-vsctl', 'del-port', 'br-ex', 'br-ex--gre-10.10.10.2'],
118
+             node=node),
119
+        call(['brctl', 'delif', 'br-mgmt', 'gre3-3'], node=node),
120
+        call(['ip', 'link', 'delete', 'gre3-3'], node=node),
121
+    ]
122
+
123
+    network.delete_overlay_networks(node, deployment_info)
124
+
125
+    assert mock_ssh.call_args_list == expected_args
126
+
127
+
128
+def test_delete_patch_ports(mocker):
129
+    node = Mock()
130
+
131
+    mock_ssh = mocker.patch('octane.util.ssh.call')
132
+
133
+    expected_args = [
134
+        call(['ovs-vsctl', 'del-port', 'br-ovs-bond1', 'br-ovs-bond1--br-ex'],
135
+             node=node),
136
+        call(['ovs-vsctl', 'del-port', 'br-ex', 'br-ex--br-ovs-bond1'],
137
+             node=node),
138
+        call(['ovs-vsctl', 'del-port', 'br-ovs-bond2',
139
+              'br-ovs-bond2--br-mgmt'],
140
+             node=node),
141
+        call(['ovs-vsctl', 'del-port', 'br-mgmt', 'br-mgmt--br-ovs-bond2'],
142
+             node=node),
143
+    ]
144
+
145
+    network.delete_patch_ports(node, DEPLOYMENT_INFO_5_1)
146
+
147
+    assert mock_ssh.call_args_list == expected_args
148
+
149
+
150
+def test_delete_lnx_ports(mocker):
151
+    node = Mock()
152
+
153
+    mock_ssh = mocker.patch('octane.util.ssh.call')
154
+
155
+    expected_args = [
156
+        call(['brctl', 'delif', 'br-ex', 'eth0.130'],
157
+             node=node),
158
+        call(['brctl', 'delif', 'br-mgmt', 'eth1.220'],
159
+             node=node),
160
+    ]
161
+
162
+    network.delete_patch_ports(node, DEPLOYMENT_INFO_7_0)
163
+
164
+    assert mock_ssh.call_args_list == expected_args
165
+
166
+
167
+def test_create_patch_ports_5_1(mocker):
168
+    node = Mock()
169
+
170
+    mock_ssh = mocker.patch('octane.util.ssh.call')
171
+
172
+    expected_args = [
173
+        call(['ovs-vsctl', 'add-port', 'br-ex', 'br-ex--br-ovs-bond1',
174
+              'trunks=[0]', '--', 'set', 'interface', 'br-ex--br-ovs-bond1',
175
+              'type=patch', 'options:peer=br-ovs-bond1--br-ex'],
176
+             node=node),
177
+        call(['ovs-vsctl', 'add-port', 'br-ovs-bond1', 'br-ovs-bond1--br-ex',
178
+              'trunks=[0]', '--', 'set', 'interface', 'br-ovs-bond1--br-ex',
179
+              'type=patch', 'options:peer=br-ex--br-ovs-bond1'],
180
+             node=node),
181
+        call(['ovs-vsctl', 'add-port', 'br-mgmt', 'br-mgmt--br-ovs-bond2',
182
+              '--', 'set', 'interface', 'br-mgmt--br-ovs-bond2', 'type=patch',
183
+              'options:peer=br-ovs-bond2--br-mgmt'],
184
+             node=node),
185
+        call(['ovs-vsctl', 'add-port', 'br-ovs-bond2', 'br-ovs-bond2--br-mgmt',
186
+              'tag=102', '--', 'set', 'interface', 'br-ovs-bond2--br-mgmt',
187
+              'type=patch', 'options:peer=br-mgmt--br-ovs-bond2'],
188
+             node=node)
189
+    ]
190
+
191
+    network.create_patch_ports(node, DEPLOYMENT_INFO_5_1)
192
+
193
+    assert mock_ssh.call_args_list == expected_args
194
+
195
+
196
+def test_create_patch_ports_7_0(mocker):
197
+    node = Mock()
198
+
199
+    mock_ssh = mocker.patch('octane.util.ssh.call')
200
+
201
+    expected_args = [
202
+        call(['ovs-vsctl', 'add-port', 'br-ex', 'br-ex--br-ovs-bond1', '--',
203
+              'set', 'interface', 'br-ex--br-ovs-bond1', 'type=patch',
204
+              'options:peer=br-ovs-bond1--br-ex'],
205
+             node=node),
206
+        call(['ovs-vsctl', 'add-port', 'br-ovs-bond1', 'br-ovs-bond1--br-ex',
207
+              '--', 'set', 'interface', 'br-ovs-bond1--br-ex', 'type=patch',
208
+              'options:peer=br-ex--br-ovs-bond1'],
209
+             node=node),
210
+        call(['ovs-vsctl', 'add-port', 'br-mgmt', 'br-mgmt--br-ovs-bond2',
211
+              '--', 'set', 'interface', 'br-mgmt--br-ovs-bond2', 'type=patch',
212
+              'options:peer=br-ovs-bond2--br-mgmt'],
213
+             node=node),
214
+        call(['ovs-vsctl', 'add-port', 'br-ovs-bond2', 'br-ovs-bond2--br-mgmt',
215
+              'tag=102', '--', 'set', 'interface', 'br-ovs-bond2--br-mgmt',
216
+              'type=patch', 'options:peer=br-mgmt--br-ovs-bond2'],
217
+             node=node)
218
+    ]
219
+
220
+    network.create_patch_ports(node, DEPLOYMENT_INFO_OVS_7_0)
221
+
222
+    assert mock_ssh.call_args_list == expected_args
223
+
224
+
225
+DEPLOYMENT_INFO_5_1 = {
226
+    'openstack_version': '2014.1.3-5.1.1',
227
+    'network_scheme': {
228
+        'transformations': [{
229
+            'action': 'add-br',
230
+            'name': 'br-ex',
231
+        }, {
232
+            'action': 'add-patch',
233
+            'bridges': [
234
+                'br-ovs-bond1',
235
+                'br-ex'
236
+            ],
237
+            'trunks': [
238
+                0
239
+            ]
240
+        }, {
241
+            'action': 'add-patch',
242
+            'bridges': [
243
+                'br-ovs-bond2',
244
+                'br-mgmt'
245
+            ],
246
+            'tags': [
247
+                102,
248
+                0
249
+            ]
250
+        }, {
251
+            'action': 'add-br',
252
+            'name': 'br-mgmt',
253
+        }]
254
+    }
255
+}
256
+
257
+
258
+DEPLOYMENT_INFO_OVS_7_0 = {
259
+    'openstack_version': '2015.1.0-7.0',
260
+    'network_scheme': {
261
+        'transformations': [{
262
+            'action': 'add-br',
263
+            'name': 'br-ex',
264
+            'provider': 'ovs',
265
+        }, {
266
+            'action': 'add-patch',
267
+            'bridges': [
268
+                'br-ovs-bond1',
269
+                'br-ex'
270
+            ],
271
+            'vlan_ids': [
272
+                0,
273
+                0
274
+            ]
275
+        }, {
276
+            'action': 'add-patch',
277
+            'bridges': [
278
+                'br-ovs-bond2',
279
+                'br-mgmt'
280
+            ],
281
+            'vlan_ids': [
282
+                102,
283
+                0
284
+            ]
285
+        }, {
286
+            'action': 'add-br',
287
+            'name': 'br-mgmt',
288
+            'provider': 'ovs'
289
+        }]
290
+    }
291
+}
292
+
293
+DEPLOYMENT_INFO_7_0 = {
294
+    'openstack_version': '2015.1.0-7.0',
295
+    'network_scheme': {
296
+        'transformations': [{
297
+            'action': 'add-br',
298
+            'name': 'br-ex',
299
+        }, {
300
+            'action': 'add-port',
301
+            'name': 'eth0.130',
302
+            'bridge': 'br-ex'
303
+        }, {
304
+            'action': 'add-br',
305
+            'name': 'br-mgmt',
306
+        }, {
307
+            'action': 'add-port',
308
+            'name': 'eth1.220',
309
+            'bridge': 'br-mgmt'
310
+        }]
311
+    }
312
+}

+ 77
- 0
octane/tests/test_transformations.py View File

@@ -0,0 +1,77 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+
13
+from octane.helpers import transformations as ts
14
+
15
+
16
+def test_reset_gw_admin(mocker):
17
+    host_config = DEPLOYMENT_INFO
18
+    gateway = '10.10.10.10'
19
+
20
+    res = ts.reset_gw_admin(host_config, gateway)
21
+
22
+    assert res['network_scheme']['endpoints']['br-fw-admin']['gateway'] == \
23
+        gateway
24
+
25
+
26
+def test_get_network_gw(mocker):
27
+    net_name = 'test_net'
28
+    gateway = '10.10.10.10'
29
+    data = {
30
+        'networks': [
31
+            {
32
+                'name': net_name,
33
+                'gateway': gateway
34
+            }
35
+        ]
36
+    }
37
+
38
+    res = ts.get_network_gw(data, net_name)
39
+
40
+    assert res == gateway
41
+
42
+
43
+def test_get_network_gw_no_gw(mocker):
44
+    net_name = 'test_net'
45
+    data = {
46
+        'networks': [{
47
+            'name': net_name,
48
+        }]
49
+    }
50
+
51
+    res = ts.get_network_gw(data, net_name)
52
+
53
+    assert res is None
54
+
55
+
56
+def test_get_network_gw_no_net(mocker):
57
+    net_name = 'test_net'
58
+    data = {
59
+        'networks': [{
60
+            'name': 'another_test_net',
61
+            'gateway': '10.10.10.10'
62
+        }]
63
+    }
64
+
65
+    res = ts.get_network_gw(data, net_name)
66
+
67
+    assert res is None
68
+
69
+
70
+DEPLOYMENT_INFO = {
71
+    'network_scheme': {
72
+        'endpoints': {
73
+            'br-ex': {'gateway': '172.16.0.1', },
74
+            'br-fw-admin': {}
75
+        }
76
+    }
77
+}

+ 19
- 0
octane/tests/test_upgrade_ceph.py View File

@@ -0,0 +1,19 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+
13
+
14
+def test_parser(mocker, octane_app):
15
+    m = mocker.patch('octane.commands.upgrade_ceph.upgrade_ceph')
16
+    octane_app.run(["upgrade-ceph", "1", "2"])
17
+    assert not octane_app.stdout.getvalue()
18
+    assert not octane_app.stderr.getvalue()
19
+    m.assert_called_once_with(1, 2)

+ 98
- 0
octane/tests/test_util_node.py View File

@@ -0,0 +1,98 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+
13
+import io
14
+import mock
15
+import pytest
16
+
17
+from octane.util import node as node_util
18
+from octane.util import ssh
19
+
20
+
21
+NODES = [
22
+    {'fqdn': 'node-1',
23
+     'network_data': [{'name': 'management', 'ip': '10.20.0.2'},
24
+                      {'name': 'public', 'ip': '172.167.0.2'}]},
25
+    {'fqdn': 'node-2',
26
+     'network_data': [{'name': 'management', 'ip': '10.20.0.3'},
27
+                      {'name': 'public', 'ip': '172.167.0.3'}]},
28
+    {'fqdn': 'node-3',
29
+     'network_data': [{'name': 'management', 'ip': '10.20.0.4'},
30
+                      {'name': 'public', 'ip': '172.167.0.4'}]},
31
+]
32
+
33
+
34
+@pytest.mark.parametrize('node_data,network_name,expected_ip', [
35
+    (NODES[0], 'management', '10.20.0.2'),
36
+    (NODES[0], 'storage', None),
37
+    ({'network_data': []}, 'management', None),
38
+])
39
+def test_get_ip(node_data, network_name, expected_ip):
40
+    node = create_node(node_data)
41
+    ip = node_util.get_ip(network_name, node)
42
+    assert ip == expected_ip
43
+
44
+
45
+def create_node(data):
46
+    return mock.Mock(data=data, spec_set=['data'])
47
+
48
+
49
+@pytest.fixture
50
+def nodes():
51
+    return map(create_node, NODES)
52
+
53
+
54
+@pytest.mark.parametrize("network_name,expected_ips", [
55
+    ('management', ['10.20.0.2', '10.20.0.3', '10.20.0.4']),
56
+    ('public', ['172.167.0.2', '172.167.0.3', '172.167.0.4']),
57
+])
58
+def test_get_ips(nodes, network_name, expected_ips):
59
+    ips = node_util.get_ips(network_name, nodes)
60
+    assert ips == expected_ips
61
+
62
+
63
+def test_get_hostnames(nodes):
64
+    hostnames = node_util.get_hostnames(nodes)
65
+    assert hostnames == ['node-1', 'node-2', 'node-3']
66
+
67
+
68
+def test_tar_files(node, mock_ssh_popen, mock_open):
69
+    content = b'fake data\nin\nthe\narchive'
70
+
71
+    proc = mock_ssh_popen.return_value.__enter__.return_value
72
+    proc.stdout = io.BytesIO(content)
73
+    buf = io.BytesIO()
74
+    mock_open.return_value.write.side_effect = buf.write
75
+
76
+    node_util.tar_files('filename', node, 'a.file', 'b.file')
77
+
78
+    mock_ssh_popen.assert_called_once_with(
79
+        ['tar', '-czvP', 'a.file', 'b.file'],
80
+        stdout=ssh.PIPE, node=node)
81
+    mock_open.assert_called_once_with('filename', 'wb')
82
+    assert buf.getvalue() == content
83
+
84
+
85
+def test_untar_files(node, mock_ssh_popen, mock_open):
86
+    content = b'fake data\nin\nthe\narchive'
87
+
88
+    proc = mock_ssh_popen.return_value.__enter__.return_value
89
+    buf = io.BytesIO()
90
+    proc.stdin.write = buf.write
91
+    mock_open.return_value = io.BytesIO(content)
92
+
93
+    node_util.untar_files('filename', node)
94
+
95
+    mock_ssh_popen.assert_called_once_with(['tar', '-xzv', '-C', '/'],
96
+                                           stdin=ssh.PIPE, node=node)
97
+    mock_open.assert_called_once_with('filename', 'rb')
98
+    assert buf.getvalue() == content

+ 98
- 9
octane/util/env.py View File

@@ -22,6 +22,8 @@ from fuelclient.objects import environment as environment_obj
22 22
 from fuelclient.objects import node as node_obj
23 23
 from fuelclient.objects import task as task_obj
24 24
 
25
+from octane.helpers import tasks as tasks_helpers
26
+from octane.helpers import transformations
25 27
 from octane import magic_consts
26 28
 from octane.util import ssh
27 29
 from octane.util import subprocess
@@ -225,37 +227,38 @@ def move_nodes(env, nodes):
225 227
         node_id = node.data['id']
226 228
         subprocess.call(
227 229
             ["fuel2", "env", "move", "node", str(node_id), str(env_id)])
230
+    LOG.info("Nodes provision started. Please wait...")
228 231
     wait_for_nodes(nodes, "provisioned")
229 232
 
230 233
 
231 234
 def provision_nodes(env, nodes):
232 235
     env.install_selected_nodes('provision', nodes)
233
-    wait_for_nodes(nodes, "provisioned")
236
+    LOG.info("Nodes provision started. Please wait...")
237
+    wait_for_nodes(nodes, "provisioned", timeout=180 * 60)
234 238
 
235 239
 
236 240
 def deploy_nodes(env, nodes):
237 241
     env.install_selected_nodes('deploy', nodes)
238
-    wait_for_nodes(nodes, "ready")
242
+    LOG.info("Nodes deploy started. Please wait...")
243
+    wait_for_nodes(nodes, "ready", timeout=180 * 60)
239 244
     wait_for_tasks(env, "running")
240 245
 
241 246
 
242 247
 def deploy_changes(env, nodes):
243 248
     env.deploy_changes()
249
+    LOG.info("Nodes deploy started. Please wait...")
244 250
     wait_for_env(env, "operational", timeout=180 * 60)
245 251
 
246 252
 
247
-def merge_deployment_info(env):
248
-    default_info = env.get_default_facts('deployment')
253
+def get_deployment_info(env):
254
+    deployment_info = []
249 255
     try:
250 256
         deployment_info = env.get_facts('deployment')
251 257
     except fuelclient.cli.error.ServerDataException:
252 258
         LOG.warn('Deployment info is unchanged for env: %s',
253 259
                  env.id)
254
-        deployment_info = []
255
-    for info in default_info:
256
-        if not (info['uid'], info['role']) in [(i['uid'], i['role'])
257
-           for i in deployment_info]:
258
-            deployment_info.append(info)
260
+    deployment_info = [x for x in deployment_info
261
+                       if x['role'] != 'primary-controller']
259 262
     return deployment_info
260 263
 
261 264
 
@@ -275,3 +278,89 @@ def set_network_template(env, filename):
275 278
     with open(filename, 'r') as f:
276 279
         data = f.read()
277 280
         env.set_network_template_data(yaml.load(data))
281
+
282
+
283
+def update_deployment_info(env, isolated):
284
+    default_info = env.get_default_facts('deployment')
285
+    network_data = env.get_network_data()
286
+    gw_admin = transformations.get_network_gw(network_data,
287
+                                              "fuelweb_admin")
288
+    if isolated:
289
+        # From backup_deployment_info
290
+        backup_path = os.path.join(
291
+            magic_consts.FUEL_CACHE,
292
+            "deployment_{0}.orig".format(env.id),
293
+        )
294
+        if not os.path.exists(backup_path):
295
+            os.makedirs(backup_path)
296
+        # Roughly taken from Environment.write_facts_to_dir
297
+        for info in default_info:
298
+            fname = os.path.join(
299
+                backup_path,
300
+                "{0}_{1}.yaml".format(info['role'], info['uid']),
301
+            )
302
+            with open(fname, 'w') as f:
303
+                yaml.safe_dump(info, f, default_flow_style=False)
304
+    deployment_info = []
305
+    for info in default_info:
306
+        if isolated:
307
+            transformations.remove_ports(info)
308
+            transformations.reset_gw_admin(info, gw_admin)
309
+        # From run_ping_checker
310
+        info['run_ping_checker'] = False
311
+        transformations.remove_predefined_nets(info)
312
+        deployment_info.append(info)
313
+    env.upload_facts('deployment', deployment_info)
314
+
315
+    tasks = env.get_deployment_tasks()
316
+    tasks_helpers.skip_tasks(tasks)
317
+    env.update_deployment_tasks(tasks)
318
+
319
+
320
+def find_node_deployment_info(node, roles, data):
321
+    node_roles = [n['role']
322
+                  for n in data[0]['nodes'] if str(node.id) == n['uid']]
323
+    if not set(roles) & set(node_roles):
324
+        return None
325
+
326
+    for info in data:
327
+        if info['uid'] == str(node.id):
328
+            return info
329
+    return None
330
+
331
+
332
+def get_backup_deployment_info(env_id):
333
+    deployment_info = []
334
+    backup_path = os.path.join(
335
+        magic_consts.FUEL_CACHE, 'deployment_{0}.orig'.format(env_id))
336
+    if not os.path.exists(backup_path):
337
+        return None
338
+
339
+    for filename in os.listdir(backup_path):
340
+        filepath = os.path.join(backup_path, filename)
341
+        with open(filepath) as info_file:
342
+            info = yaml.safe_load(info_file)
343
+            deployment_info.append(info)
344
+
345
+    return deployment_info
346
+
347
+
348
+def collect_deployment_info(env, nodes):
349
+    deployment_info = []
350
+    for node in nodes:
351
+        info = get_astute_yaml(env, node)
352
+        deployment_info.append(info)
353
+    return deployment_info
354
+
355
+
356
+def iter_deployment_info(env, roles):
357
+    controllers = list(get_controllers(env))
358
+    full_info = get_backup_deployment_info(env.id)
359
+    roles = ['primary-controller', 'controller']
360
+
361
+    if not full_info:
362
+        full_info = collect_deployment_info(env, controllers)
363
+
364
+    for node in controllers:
365
+        info = find_node_deployment_info(node, roles, full_info)
366
+        yield (node, info)

+ 129
- 24
octane/util/maintenance.py View File

@@ -51,34 +51,121 @@ def disable_apis(env):
51 51
                 new.write(use_backend_line)
52 52
         ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
53 53
 
54
+
55
+def enable_apis(env):
56
+    controllers = list(env_util.get_controllers(env))
57
+    maintenance_line = 'backend maintenance'
58
+    use_backend_line = '  use_backend maintenance if TRUE'
59
+    for node in controllers:
60
+        sftp = ssh.sftp(node)
61
+        sftp.chdir('/etc/haproxy')
62
+        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
63
+            for line in old:
64
+                if maintenance_line in line:
65
+                    continue
66
+                new.write(line)
67
+        sftp.chdir('/etc/haproxy/conf.d')
68
+        for f in sftp.listdir():
69
+            with ssh.update_file(sftp, f) as (old, new):
70
+                for line in old:
71
+                    if use_backend_line in line:
72
+                        continue
73
+                    new.write(line)
74
+        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
75
+
76
+
54 77
 _default_exclude_services = ('p_mysql', 'p_haproxy', 'p_dns', 'p_ntp', 'vip',
55 78
                              'p_conntrackd', 'p_rabbitmq-server',
56 79
                              'clone_p_vrouter')
57 80
 
58 81
 
59
-def get_crm_services(status_out, exclude=_default_exclude_services):
82
+def get_crm_services(status_out):
60 83
     data = ElementTree.fromstring(status_out)
61 84
     for resource in data:
62
-        name = resource.get('id')
63
-        if any(service in name for service in exclude):
64
-            continue
65
-        yield name
85
+        yield resource.get('id')
86
+
87
+
88
+def start_corosync_services(env):
89
+    manage_corosync_services(env, 'start')
66 90
 
67 91
 
68 92
 def stop_corosync_services(env):
93
+    manage_corosync_services(env, 'stop')
94
+
95
+
96
+def manage_corosync_services(env, status):
69 97
     node = env_util.get_one_controller(env)
70 98
     status_out = ssh.call_output(['cibadmin', '--query', '--scope',
71 99
                                   'resources'], node=node)
72
-    for service in get_crm_services(status_out):
100
+    services_list = []
101
+    for res in get_crm_services(status_out):
102
+        if any(service in res for service in _default_exclude_services):
103
+            continue
104
+        services_list.append(res)
105
+
106
+    for service in services_list:
73 107
         while True:
74 108
             try:
75
-                ssh.call(['crm', 'resource', 'stop', service],
109
+                ssh.call(['crm', 'resource', status, service],
76 110
                          node=node)
77 111
             except subprocess.CalledProcessError:
78
-                pass
112
+                # Sometimes pacemaker rejects part of requests what it is
113
+                # not able to process. Sleep was added to mitigate this risk.
114
+                time.sleep(1)
79 115
             else:
80 116
                 break
81
-    time.sleep(60)
117
+    wait_for_corosync_services_sync(env, services_list, status)
118
+
119
+
120
+def wait_for_corosync_services_sync(env, resource_list, status,
121
+                                    timeout=1200, check_freq=20):
122
+    status_bool = status == 'start'
123
+    node = env_util.get_one_controller(env)
124
+    started_at = time.time()
125
+    while True:
126
+        crm_out = ssh.call_output(['crm_mon', '--as-xml'], node=node)
127
+        if is_resources_synced(resource_list, crm_out, status_bool):
128
+            return
129
+        if time.time() - started_at >= timeout:
130
+            raise Exception("Timeout waiting for corosync cluster for env %s"
131
+                            " to be synced" % env.id)
132
+        time.sleep(check_freq)
133
+
134
+
135
+def is_resources_synced(resources, crm_out, status):
136
+    def get_resource(resources, resource_id):
137
+        for resource in resources:
138
+            if resource.get('id') == resource_id:
139
+                return resource
140
+        return None
141
+
142
+    data = ElementTree.fromstring(crm_out)
143
+    mon_resources = data.find('resources')
144
+    for resource in resources:
145
+        res = get_resource(mon_resources, resource)
146
+        if not (is_resource_active(res) is status):
147
+            return False
148
+    return True
149
+
150
+
151
+# Resources are fetching from the output of 'crm_mon' command. This command
152
+# doesn't return resource if it's not started and we can consider 'absent'
153
+# resource as disabled.
154
+def is_resource_active(resource):
155
+    if resource is None:
156
+        return False
157
+    if resource.tag == 'resource':
158
+        return is_primitive_active(resource)
159
+    for primitive in resource:
160
+        if not is_primitive_active(primitive):
161
+            return False
162
+    return True
163
+
164
+
165
+def is_primitive_active(resource):
166
+    if resource.get('active') == 'true':
167
+        return True
168
+    return False
82 169
 
83 170
 
84 171
 def stop_upstart_services(env):
@@ -106,21 +193,6 @@ def stop_upstart_services(env):
106 193
             ssh.call(['stop', service], node=node)
107 194
 
108 195
 
109
-def start_corosync_services(env):
110
-    node = next(env_util.get_controllers(env))
111
-    status_out = ssh.call_output(['cibadmin', '--query', '--scope',
112
-                                  'resources'], node=node)
113
-    for service in get_crm_services(status_out):
114
-        while True:
115
-            try:
116
-                ssh.call(['crm', 'resource', 'start', service],
117
-                         node=node)
118
-            except subprocess.CalledProcessError:
119
-                pass
120
-            else:
121
-                break
122
-
123
-
124 196
 def start_upstart_services(env):
125 197
     controllers = list(env_util.get_controllers(env))
126 198
     for node in controllers:
@@ -134,3 +206,36 @@ def start_upstart_services(env):
134 206
                 to_start = svc_file.read().splitlines()
135 207
         for service in to_start:
136 208
             ssh.call(['start', service], node=node)
209
+
210
+
211
+def stop_cluster(env):
212
+    cmds = [['pcs', 'cluster', 'kill']]
213
+    controllers = list(env_util.get_controllers(env))
214
+    for node in controllers:
215
+        for cmd in cmds:
216
+            ssh.call(cmd, node=node)
217
+
218
+
219
+def start_cluster(env):
220
+    major_version = env.data['fuel_version'].split('.')[0]
221
+    cmds = []
222
+    if int(major_version) < 6:
223
+        cmds = [['service', 'corosync', 'start']]
224
+    else:
225
+        cmds = [['pcs', 'cluster', 'start']]
226
+    controllers = list(env_util.get_controllers(env))
227
+    for node in controllers:
228
+        for cmd in cmds:
229
+            ssh.call(cmd, node=node)
230
+    # When we start cluster we should wait while resources from constant
231
+    # `_default_exclude_services` become up and running. BTW, We don't touch
232
+    # these resources in stop/start corosync resources methods at all.
233
+    node = env_util.get_one_controller(env)
234
+    status_out = ssh.call_output(['cibadmin', '--query', '--scope',
235
+                                  'resources'], node=node)
236
+    services_list = []
237
+    for res in get_crm_services(status_out):
238
+        if any(service in res for service in _default_exclude_services):
239
+            services_list.append(res)
240
+
241
+    wait_for_corosync_services_sync(env, services_list, 'start')

+ 37
- 0
octane/util/node.py View File

@@ -10,7 +10,9 @@
10 10
 # License for the specific language governing permissions and limitations
11 11
 # under the License.
12 12
 
13
+import functools
13 14
 import logging
15
+import shutil
14 16
 import socket
15 17
 import sys
16 18
 import time
@@ -33,6 +35,41 @@ def preserve_partition(node, partition):
33