Merge branch 'stable/7.0'

Change-Id: I5f27d803e43b56e96c4dd29c6357133783a9e671
This commit is contained in:
Yuriy Taraday 2015-11-17 13:17:43 -08:00
commit cae1d8af5d
40 changed files with 1333 additions and 822 deletions

View File

@ -1,31 +0,0 @@
#!/usr/bin/python
import sys
import os
sys.path.append(os.path.normpath("{0}/../../helpers/".format(__file__)))
import transformations as t
PROVIDERS = {
"ovs": t.ovs_add_patch_ports,
"lnx": t.lnx_add_port
}
def main():
node_file = sys.argv[1]
bridge = sys.argv[2]
host_config = t.load_yaml_file(node_file)
actions = host_config["network_scheme"]["transformations"]
provider = t.get_bridge_provider(actions, bridge)
if not provider:
provider = "lnx"
commands = []
if provider in PROVIDERS:
get_commands = PROVIDERS[provider]
commands = get_commands(actions, bridge)
for command in commands:
print command
if __name__ == '__main__':
main()

View File

@ -1,20 +0,0 @@
KEY=0 # Start value for GRE port keys in OVS configuration
NODE_ID=0 # This constant stores ID of a node after it is reassigned to the seed
# environment
export FUEL_CACHE="/tmp/octane/deployment" # Directory to store deployment information
export PUPPET_PATH="/etc/puppet/2014.2.2-6.1/modules"
export CWD=$(dirname `readlink -f ${BASH_SOURCE[0]}`)"/../"
export BINPATH="$CWD/bin"
export LIBPATH="$CWD/lib"
export HELPER_PATH="$CWD/helpers"
export PATCH_DIR="$CWD/patches"
export SERVICE_TENANT_ID=""
. ${LIBPATH}/utils.sh
. ${LIBPATH}/functions.sh
. ${LIBPATH}/maintenance.sh
. ${LIBPATH}/patch.sh
. ${LIBPATH}/ceph.sh
. ${LIBPATH}/revert.sh

View File

@ -1,80 +0,0 @@
#!/bin/bash
[ "$1" == "-d" ] && {
set -x
shift
}
usage() {
echo "Usage: $(basename $0) [-d] COMMAND ENV_ID [ENV_ID]
COMMAND:
prepare - prepare the Fuel Master node to upgrade an
environment
upgrade-env ENV_ID - create upgrade seed env for env ENV_ID and
copy settings from ENV_ID to upgrade seed env
upgrade-cics ORIG_ID SEED_ID - transfer state from controllers in original
env to upgrade seed env and replace CICs
upgrade-db ORIG_ID SEED_ID - migrate and upgrade state databases data
upgrade-ceph ORIG_ID SEED_ID - update Ceph cluster configuration
upgrade-node [--isolated] - move a node NODE_ID to env ENV_ID and upgrade the node
ENV_ID NODE_ID [NODE_ID ...] --isolated flag means that node won't be connected
to Management and Public networks
cleanup ENV_ID - delete data about original environment from
upgraded OpenStack cloud
cleanup-fuel - revert changes on the Fuel Master
help - display this message and exit"
}
. `dirname $(readlink -f $0)`/env
[ -z "$1" ] && die "$(usage)"
case $1 in
prepare)
pycmd "$@"
;;
upgrade-env)
pycmd "$@"
;;
upgrade-cics)
[ -z "$2" ] && die "$(usage)"
[ -z "$3" ] && die "$(usage)"
upgrade_cics $2 $3
;;
upgrade-db)
pycmd "$@"
;;
upgrade-ceph)
[ -z "$2" ] && die "$(usage)"
[ -z "$3" ] && die "$(usage)"
upgrade_ceph $2 $3
;;
upgrade-node)
pycmd "$@"
;;
upgrade-nova-compute)
# TODO(ogelbukh) delete as obsoleted by upgrade-cics command.
[ -z "$2" ] && die "$(usage)"
list_nodes $2 compute \
| xargs -I{} ./upgrade-nova-compute.sh {}
;;
cleanup)
[ -z "$2" ] && die "$(usage)"
cleanup_nova_services $2
cleanup_neutron_services $2
;;
cleanup-fuel)
cleanup_fuel
;;
help)
usage
;;
*)
echo "Invalid command: $1"
usage
exit 1
;;
esac
exit 0
# vi:sw=4:ts=4:

View File

@ -1,96 +0,0 @@
#!/bin/bash
set -ex
SRC=${4:-/etc/neutron}
TMPL=${3:-neutron-template}
TEMPLATE_FILE=../patches/neutron-template.tar
function log {
echo $* > /dev/stderr
}
function exit_error {
log "Error"
exit 1
}
function exit_success {
log "Success"
exit 0
}
function tmpl_var_names {
egrep -Rho '%[A-Z_]+%' $1 | sed -r ':a;N;$!ba;s/\n/\l|/g;s/^/^(/;s/$/)/' | sed 's/\(.*\)/\L\1/;s/%//g'
}
function tmpl_var_values {
sed -r 's/[ ]+?=[ ]+?/=/g' | awk -F= '/=/ {printf("s/%%%s%%/%s/g;\n", toupper($1), $2)}'
}
function prepare() {
local TMPL_DIR=$1
local SRC_DIR=$2
local OUTPUT_DIR="/tmp/neutron-$$"
log "Check source and template dirs"
test -d $SRC_DIR -a -d $TMPL_DIR
log "Generate variable names"
var_names=`tmpl_var_names $TMPL_DIR`
log "Get values from source dir"
var_values=`egrep -hR "$var_names" $SRC_DIR | tmpl_var_values`
cp -r $TMPL_DIR $OUTPUT_DIR
find $OUTPUT_DIR -type f | xargs -tI{} sed -ri'' "$var_values" {}
echo $OUTPUT_DIR
}
function install() {
local SRC_DIR=$1
local DST_DIR=$2
test -d $SRC_DIR -a -d $DST_DIR
test -z "$NEUTRON_BACKUP" && {
tar cvf /tmp/neutron-before-upgrade$$.tar $DST_DIR
}
rm -rf $DST_DIR
cp -vr $SRC_DIR $DST_DIR
test -f $DST_DIR/plugins/ml2/ml2_conf.ini
ln -s $DST_DIR/plugins/ml2/ml2_conf.ini $DST_DIR/plugin.ini
test -h $DST_DIR/plugin.ini
chown -R root:neutron $DST_DIR
}
function bootstrap() {
local NODE=$1
test -f $0 -a -f ${TEMPLATE_FILE}
scp $0 ${TEMPLATE_FILE} ${NODE}:
ssh ${NODE} "test -d neutron-template || mkdir neutron-template; tar xvf `basename $TEMPLATE_FILE` -C neutron-template"
}
trap exit_error EXIT
case "$1" in
prepare)
prepare $2 "/etc/neutron"
;;
install)
install $2 "/etc/neutron"
;;
bootstrap)
bootstrap $2
;;
*)
echo "Usage: $0 [prepare|install]"
exit 1
esac
trap exit_success EXIT

View File

@ -1,53 +0,0 @@
#!/bin/sh -e
extract_vars() {
sed -re '/^\+.*%.*/ s/.*%([^%]+)%.*/\L\1/;tx;d;:x' $1
}
convert_vars_to_regex() {
tr "\n" " "| sed -re 's,^,^(,;s,.$,),;s, ,|,g'
}
generate_template_regex() {
egrep "`extract_vars $1 | convert_vars_to_regex`" | awk -F= '{key = gensub(" ", "", "g", $1); printf("s|%%%s%%|%s|g;", toupper(key), $2)}'
}
upgrade_compute_service() {
local regex
local nova_regex
#regex=$(ssh $1 "find /etc/neutron -type f -exec cat {} \;" | generate_template_regex $PATCH)
./upgrade-neutron.sh bootstrap $1
local tmp_dir=`ssh $1 ./upgrade-neutron.sh prepare neutron-template`
if [ -z "$tmp_dir" ]; then
echo "Tmp dir err"
exit 1
fi
nova_regex=$(ssh $1 "cat /etc/nova/nova.conf" | generate_template_regex $NOVA_PATCH)
#sed -r "$regex" ${PATCH} | ssh $1 "tee /tmp/patch-neutron-config_$1.patch"
ssh $1 "apt-get update; apt-get install -o Dpkg::Options::='--force-confnew' --yes nova-compute"
#ssh $1 "cd /etc/neutron && patch -p0 < /tmp/patch-neutron-config_$1.patch"
cat ${NOVA_PATCH} | sed -r "${nova_regex}" | ssh $1 "cat > /etc/nova/nova.conf"
ssh $1 ./upgrade-neutron.sh install $tmp_dir
ssh $1 'restart nova-compute && ( stop neutron-plugin-openvswitch-agent; start neutron-plugin-openvswitch-agent )'
}
add_apt_sources() {
local source
source="http://$(grep fuel /etc/hosts | cut -d \ -f1):8080/2014.2-6.0/ubuntu/x86_64"
printf "\ndeb $source precise main\n" | ssh $1 "cat >> /etc/apt/sources.list"
}
[ -f "./functions" ] && . ./functions
[ -z "$1" ] && die "No node ID provided, exiting"
PATCH=${2-"../patches/neutron-upgrade.patch"}
NOVA_PATCH=${3-"../patches/nova.conf"}
if [ ! -f "$PATCH" -o ! -f "$NOVA_PATCH" ]; then
echo "Usage $0 NODE_ID [NEUTRON_PATCH_PATH] [NOVA_PATCH_PATH]" >> /dev/stderr
exit 1
fi
add_apt_sources $1
upgrade_compute_service $1

View File

@ -109,14 +109,13 @@ def install_node(orig_id, seed_id, node_ids, isolated=False, networks=None):
if networks:
env_util.clone_ips(orig_id, networks)
node_util.reboot_nodes(nodes)
LOG.info("Nodes reboot in progress. Please wait...")
node_util.reboot_nodes(nodes, timeout=180 * 60)
node_util.wait_for_mcollective_start(nodes)
env_util.provision_nodes(seed_env, nodes)
for node in nodes:
# FIXME: properly call all handlers all over the place
controller_upgrade.ControllerUpgrade(
node, seed_env, isolated=isolated).predeploy()
env_util.update_deployment_info(seed_env, isolated)
if isolated and len(nodes) > 1:
isolate(nodes, seed_env)

View File

@ -22,15 +22,23 @@ from octane.util import subprocess
def patch_puppet(revert=False):
direction = "-R" if revert else "-N"
puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet")
for d in os.listdir(puppet_patch_dir):
d = os.path.join(puppet_patch_dir, d)
if not os.path.isdir(d):
continue
with open(os.path.join(d, "patch")) as patch:
subprocess.call(["patch", direction, "-p3"], stdin=patch,
cwd=magic_consts.PUPPET_DIR)
try:
subprocess.call(["patch", "-R", "-p3"], stdin=patch,
cwd=magic_consts.PUPPET_DIR)
except subprocess.CalledProcessError:
if not revert:
pass
else:
raise
if not revert:
subprocess.call(["patch", "-N", "-p3"], stdin=patch,
cwd=magic_consts.PUPPET_DIR)
def apply_patches(revert=False):

View File

@ -0,0 +1,49 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers import network
from octane.util import env as env_util
from octane.util import maintenance
def rollback_control_plane(seed_id, orig_id):
seed_env = environment_obj.Environment(seed_id)
orig_env = environment_obj.Environment(orig_id)
# switch physical networks connectivity to orig_env
roles = ['primary-controller', 'controller']
for node, info in env_util.iter_deployment_info(seed_env, roles):
network.delete_patch_ports(node, info)
for node, info in env_util.iter_deployment_info(orig_env, roles):
network.create_patch_ports(node, info)
# enable cluster's services for orig_env
maintenance.start_cluster(orig_env)
maintenance.start_corosync_services(orig_env)
maintenance.enable_apis(orig_env)
class RollbackControlPlaneCommand(cmd.Command):
"""Rollback control plane to the orig environment"""
def get_parser(self, prog_name):
parser = super(RollbackControlPlaneCommand, self).get_parser(prog_name)
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
return parser
def take_action(self, parsed_args):
rollback_control_plane(parsed_args.seed_id, parsed_args.orig_id)

View File

@ -12,7 +12,6 @@
import argparse
import logging
import pyzabbix
import re
import requests
@ -81,9 +80,9 @@ def get_zabbix_client(astute):
session.proxies = {
'http': 'http://{0}:8888'.format(node_ip)
}
import pyzabbix
client = pyzabbix.ZabbixAPI(server=url, session=session)
client.login(user=user, password=password)
return client

View File

@ -0,0 +1,168 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import itertools
import os
import re
import subprocess
import tarfile
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane import magic_consts
from octane.util import env as env_util
from octane.util import node as node_util
from octane.util import ssh
def short_hostname(hostname):
return hostname.partition('.')[0]
def remove_mask(ip_addr):
return ip_addr.partition('/')[0]
def replace_addresses(conf, hostnames, mgmt_ips):
mon_initial_members = ' '.join(hostnames)
mon_host = ' '.join(mgmt_ips)
conf = re.sub(r'\n(mon_initial_members\s+=\s+)[-.\w\s]*\n',
"\n\g<1>{0}\n".format(mon_initial_members),
conf)
conf = re.sub(r'\n(mon_host\s+=\s+)[-.\w\s]*\n',
"\n\g<1>{0}\n".format(mon_host),
conf)
return conf
def get_fsid(conf):
match = re.search(r'\nfsid\s+=\s+([-.\w]+)\s*\n', conf)
if match is not None:
return match.group(1)
def replace_host(conf, hostname):
conf = re.sub(r'\n(host\s+=\s+)[-.\w\s]*\n',
"\n\g<1>{0}\n".format(hostname),
conf)
return conf
def import_bootstrap_osd(node):
ssh.call(['ceph', 'auth', 'import', '-i',
'/root/ceph.bootstrap-osd.keyring'], node=node)
ssh.call(['ceph', 'auth', 'caps', 'client.bootstrap-osd', 'mon',
"'allow profile bootstrap-osd'"], node=node)
def get_ceph_conf_filename(node):
cmd = [
'bash', '-c',
'pgrep ceph-mon | xargs -I{} cat /proc/{}/cmdline',
]
cmdlines = ssh.call_output(cmd, node=node)
if cmdlines:
cmdline = cmdlines.split('\n')[0].split('\0')
for i, value in enumerate(cmdline):
if value == '-c' and i < len(cmdline):
return cmdline[i + 1]
return '/etc/ceph/ceph.conf'
def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
nodes = list(env_util.get_controllers(seed_env))
hostnames = map(short_hostname, node_util.get_hostnames(nodes))
mgmt_ips = map(remove_mask, node_util.get_ips('management', nodes))
with contextlib.closing(tarfile.open(filename)) as f:
conf = f.extractfile(conf_filename).read()
conf = replace_addresses(conf, hostnames, mgmt_ips)
fsid = get_fsid(conf)
monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']
for node_hostname, node_ip in itertools.izip(hostnames, mgmt_ips):
monmaptool_cmd += ['--add', node_hostname, node_ip]
for node, node_hostname in itertools.izip(nodes, hostnames):
node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
node_conf = replace_host(conf, node_hostname)
try:
ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
node=node)
except subprocess.CalledProcessError:
pass
ssh.call(['rm', '-rf', node_db_path], node=node)
node_util.untar_files(filename, node)
sftp = ssh.sftp(node)
with sftp.open(conf_filename, 'w') as f:
f.write(node_conf)
ssh.call(['mv', db_path, node_db_path], node=node)
sysvinit = os.path.join(node_db_path, 'sysvinit')
try:
sftp.remove(sysvinit)
except IOError:
pass
upstart = os.path.join(node_db_path, 'upstart')
sftp.open(upstart, 'w').close()
with ssh.tempdir(node) as tempdir:
monmap_filename = os.path.join(tempdir, 'monmap')
ssh.call(monmaptool_cmd + [monmap_filename], node=node)
ssh.call(['ceph-mon', '-i', node_hostname, '--inject-monmap',
monmap_filename], node=node)
for node, node_hostname in itertools.izip(nodes, hostnames):
ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
node=node)
import_bootstrap_osd(nodes[0])
def extract_mon_conf_files(orig_env, tar_filename):
controller = env_util.get_one_controller(orig_env)
conf_filename = get_ceph_conf_filename(controller)
conf_dir = os.path.dirname(conf_filename)
hostname = short_hostname(
node_util.get_hostname_remotely(controller))
db_path = "/var/lib/ceph/mon/ceph-{0}".format(hostname)
node_util.tar_files(tar_filename, controller, conf_dir, db_path)
return conf_filename, db_path
def upgrade_ceph(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
tar_filename = os.path.join(magic_consts.FUEL_CACHE,
"env-{0}-ceph.conf.tar.gz".format(orig_id))
conf_filename, db_path = extract_mon_conf_files(orig_env, tar_filename)
ceph_set_new_mons(seed_env, tar_filename, conf_filename, db_path)
class UpgradeCephCommand(cmd.Command):
"""update Ceph cluster configuration."""
def get_parser(self, prog_name):
parser = super(UpgradeCephCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
upgrade_ceph(parsed_args.orig_id, parsed_args.seed_id)

View File

@ -9,45 +9,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers import network
from octane import magic_consts
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import ssh
def disconnect_networks(env):
controllers = list(env_util.get_controllers(env))
for node in controllers:
deployment_info = env_util.get_astute_yaml(env, node)
network.delete_patch_ports(node, deployment_info)
def connect_to_networks(env):
deployment_info = []
controllers = list(env_util.get_controllers(env))
backup_path = os.path.join(magic_consts.FUEL_CACHE,
'deployment_{0}.orig'
.format(env.id))
for filename in os.listdir(backup_path):
filepath = os.path.join(backup_path, filename)
with open(filepath) as info_file:
info = yaml.safe_load(info_file)
deployment_info.append(info)
for node in controllers:
for info in deployment_info:
if (info['role'] in ('primary-controller', 'controller')
and info['uid'] == str(node.id)):
network.delete_overlay_networks(node, info)
network.create_patch_ports(node, info)
def update_neutron_config(orig_env, seed_env):
controllers = list(env_util.get_controllers(seed_env))
tenant_id = env_util.cache_service_tenant_id(orig_env)
@ -61,11 +31,30 @@ def update_neutron_config(orig_env, seed_env):
def upgrade_control_plane(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
controllers = list(env_util.get_controllers(seed_env))
update_neutron_config(orig_env, seed_env)
maintenance.start_corosync_services(seed_env)
maintenance.start_upstart_services(seed_env)
disconnect_networks(orig_env)
connect_to_networks(seed_env)
# enable all services on seed env
if len(controllers) > 1:
maintenance.stop_cluster(seed_env)
else:
maintenance.start_corosync_services(seed_env)
maintenance.start_upstart_services(seed_env)
# disable cluster services on orig env
maintenance.stop_cluster(orig_env)
# switch networks to seed env
roles = ['primary-controller', 'controller']
# disable physical connectivity for orig env
for node, info in env_util.iter_deployment_info(orig_env, roles):
network.delete_patch_ports(node, info)
# enable physical connectivity for seed env
for node, info in env_util.iter_deployment_info(seed_env, roles):
network.delete_overlay_networks(node, info)
network.create_patch_ports(node, info)
# enable all services on seed env
if len(controllers) > 1:
maintenance.start_cluster(seed_env)
maintenance.start_corosync_services(seed_env)
maintenance.start_upstart_services(seed_env)
class UpgradeControlPlaneCommand(cmd.Command):

View File

@ -55,7 +55,10 @@ def upgrade_node(env_id, node_ids, isolated=False, network_template=None):
call_handlers('predeploy')
if network_template:
env_util.set_network_template(env, network_template)
env_util.deploy_nodes(env, nodes)
if isolated or len(nodes) == 1:
env_util.deploy_nodes(env, nodes)
else:
env_util.deploy_changes(env, nodes)
call_handlers('postdeploy')

View File

@ -16,6 +16,7 @@ from octane import handlers
class UpgradeHandler(object):
def __init__(self, node, env, isolated):
self.node = node
self.orig_env = self.node.env
self.env = env
self.isolated = isolated

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from octane.commands import prepare
from octane.handlers import upgrade
from octane.util import ceph
from octane.util import node as node_util
@ -22,9 +23,11 @@ class CephOsdUpgrade(upgrade.UpgradeHandler):
def prepare(self):
self.preserve_partition()
ceph.set_osd_noout(self.env)
prepare.patch_puppet()
def postdeploy(self):
ceph.unset_osd_noout(self.env)
prepare.patch_puppet(revert=True)
def preserve_partition(self):
partition = 'ceph'

View File

@ -10,8 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os.path
import stat
import subprocess
from octane.handlers import upgrade
from octane.helpers import disk
@ -21,6 +23,8 @@ from octane.util import node as node_util
from octane.util import plugin
from octane.util import ssh
LOG = logging.getLogger(__name__)
class ComputeUpgrade(upgrade.UpgradeHandler):
def prepare(self):
@ -34,12 +38,36 @@ class ComputeUpgrade(upgrade.UpgradeHandler):
def postdeploy(self):
self.restore_iscsi_initiator_info()
controller = env_util.get_one_controller(self.env)
ssh.call(
["sh", "-c", ". /root/openrc; "
"nova service-enable node-{0} nova-compute".format(
self.node.data['id'])],
node=controller,
)
# FIXME: Add more correct handling of case
# when node may have not full name in services data
try:
ssh.call(
["sh", "-c", ". /root/openrc; "
"nova service-enable {0} nova-compute".format(
self.node.data['fqdn'])],
node=controller,
)
except subprocess.CalledProcessError as exc:
LOG.warn("Cannot start service 'nova-compute' on {0} "
"by reason: {1}. Try again".format(
self.node.data['fqdn'], exc))
ssh.call(
["sh", "-c", ". /root/openrc; "
"nova service-enable {0} nova-compute".format(
self.node.data['fqdn'].split('.', 1)[0])],
node=controller,
)
sftp = ssh.sftp(self.node)
if self.orig_env.data["fuel_version"] == "6.1":
with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
for line in old:
new.write(line)
if line.startswith("[upgrade_levels]"):
new.write("compute=juno\n")
ssh.call(["service", "nova-compute", "restart"], node=self.node)
def evacuate_host(self):
controller = env_util.get_one_controller(self.env)

View File

@ -37,7 +37,11 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
self.env, self.node)
def predeploy(self):
deployment_info = env_util.merge_deployment_info(self.env)
default_info = self.env.get_default_facts('deployment')
deployment_info = env_util.get_deployment_info(self.env)
network_data = self.env.get_network_data()
gw_admin = transformations.get_network_gw(network_data,
"fuelweb_admin")
if self.isolated:
# From backup_deployment_info
backup_path = os.path.join(
@ -47,7 +51,7 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
if not os.path.exists(backup_path):
os.makedirs(backup_path)
# Roughly taken from Environment.write_facts_to_dir
for info in deployment_info:
for info in default_info:
if not info['uid'] == str(self.node.id):
continue
fname = os.path.join(
@ -56,17 +60,20 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
)
with open(fname, 'w') as f:
yaml.safe_dump(info, f, default_flow_style=False)
for info in deployment_info:
if not info['uid'] == str(self.node.id):
for info in default_info:
if not (info['role'] == 'primary-controller' or
info['uid'] == str(self.node.id)):
continue
if self.isolated:
transformations.remove_ports(info)
endpoints = deployment_info[0]["network_scheme"]["endpoints"]
self.gateway = endpoints["br-ex"]["gateway"]
transformations.reset_gw_admin(info)
if info['uid'] == str(self.node.id):
endpoints = info["network_scheme"]["endpoints"]
self.gateway = endpoints["br-ex"]["gateway"]
transformations.reset_gw_admin(info, gw_admin)
# From run_ping_checker
info['run_ping_checker'] = False
transformations.remove_predefined_nets(info)
deployment_info.append(info)
self.env.upload_facts('deployment', deployment_info)
tasks = self.env.get_deployment_tasks()
@ -83,6 +90,23 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
self.service_tenant_id))
else:
new.write(line)
if self.orig_env.data["fuel_version"] == "6.1":
with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
for line in old:
new.write(line)
if line.startswith("[upgrade_levels]"):
new.write("compute=juno\n")
nova_services = ssh.call_output(
["bash", "-c",
"initctl list | "
"awk '/nova/ && /start/ {print $1}' | tr '\n' ' '"],
node=self.node
)
for nova_service in nova_services.split():
ssh.call(["service", nova_service, "restart"], node=self.node)
ssh.call(['restart', 'neutron-server'], node=self.node)
if self.isolated and self.gateway:
# From restore_default_gateway

View File

@ -238,12 +238,14 @@ def delete_overlay_networks(node, host_config):
def delete_port_ovs(bridge, port):
bridges = port['bridges']
port_name = "%s--%s" % (bridges[0], bridges[1])
return ['ovs-vsctl', 'del-port', bridges[0], port_name]
port1_name = "%s--%s" % (bridges[0], bridges[1])
port2_name = "%s--%s" % (bridges[1], bridges[0])
return [['ovs-vsctl', 'del-port', bridges[0], port1_name],
['ovs-vsctl', 'del-port', bridges[1], port2_name]]
def delete_port_lnx(bridge, port):
return ['brctl', 'delif', bridge, port['name']]
return [['brctl', 'delif', bridge, port['name']]]
delete_port_providers = {
@ -256,8 +258,9 @@ def delete_patch_ports(node, host_config):
for bridge in magic_consts.BRIDGES:
port, provider = ts.get_patch_port_action(host_config, bridge)
delete_port_cmd = delete_port_providers[provider]
cmd = delete_port_cmd(bridge, port)
ssh.call(cmd, node=node)
cmds = delete_port_cmd(bridge, port)
for cmd in cmds:
ssh.call(cmd, node=node)
def create_port_ovs(bridge, port):
@ -273,7 +276,7 @@ def create_port_ovs(bridge, port):
return cmd
cmds = []
tags = port.get('vlan_ids', ['', ''])
tags = port.get('vlan_ids') or port.get('tags', ['', ''])
trunks = port.get('trunks', [])
bridges = port.get('bridges', [])
bridge_index = bridges.index(bridge)
@ -282,7 +285,7 @@ def create_port_ovs(bridge, port):
tag = tags[index]
tags[index] = "tag=%s" % (str(tag),) if tag else ''
trunk = ''
trunk_str = ','.join(trunks)
trunk_str = ','.join(map(str, trunks))
if trunk_str:
trunk = 'trunks=[%s]' % (trunk_str,)
if bridges:

View File

@ -99,6 +99,14 @@ def remove_predefined_nets(host_config):
return host_config
def get_network_gw(data, network_name):
for net in data['networks']:
if net['name'] == network_name:
return net.get('gateway')
else:
return None
def reset_gw_admin(host_config, gateway=None):
if gateway:
gw = gateway

View File

@ -1,167 +0,0 @@
#!/bin/bash -xe
SSH_ARGS="-o LogLevel=quiet"
MON_STATE_PATH=/var/lib/ceph/mon
extract_ceph_conf() {
sed -nr 's/.*-c ([^ ]+).*/\1/gp'
}
ceph_get_conf_dir() {
[ -z "$1" ] && die "no CIC node ID provided in args, exiting"
local ceph_args=$(ssh $SSH_ARGS root@$(get_host_ip_by_node_id $1) \
"pgrep 'ceph-mon' | xargs ps -fp | grep -m1 '^root '")
test -z "$ceph_args" &&
die "no ceph-mon process on node $1"
local config_path=$(echo $ceph_args | extract_ceph_conf)
config_path=${config_path:-/etc/ceph/ceph.conf}
# test -z "$config_path" &&
# die "Could not extract config_path from $ceph_args on node $1"
# we assume, ceph keyrings must be placed in ceph.conf directory
export CEPH_CONF_DIR=$(dirname $config_path)
}
ceph_extract_conf() {
[ -z "$1" ] && die "No 5.1.1 env ID provided as an arg, exiting"
check_env_exists $1 ||
die "Env $1 not found"
export CEPH_CONF_SRC_NODE=$(list_nodes $1 "controller" | head -1)
test -z "$CEPH_CONF_SRC_NODE" &&
die "No controllers found in Env $1"
local controller1_hostname=$(ssh $SSH_ARGS \
root@$(get_host_ip_by_node_id ${CEPH_CONF_SRC_NODE#node-}) hostname \
| cut -d. -f1)
local controller1_db_path=${MON_STATE_PATH}/ceph-${controller1_hostname}
ssh $SSH_ARGS $(get_host_ip_by_node_id ${CEPH_CONF_SRC_NODE#node-}) \
test -d $controller1_db_path ||
die "$controller1_db_path not found at $CEPH_CONF_SRC_NODE"
ceph_get_conf_dir ${CEPH_CONF_SRC_NODE#node-}
test -z "$CEPH_CONF_DIR" &&
die "Cannot find Ceph conf dir on $CEPH_CONF_SRC_NODE, exiting"
ssh $SSH_ARGS root@$(get_host_ip_by_node_id ${CEPH_CONF_SRC_NODE#node-}) \
"tar cvf - $CEPH_CONF_DIR $controller1_db_path | gzip" \
| cat - > ${FUEL_CACHE}/env-$1-ceph.conf.tar.gz
}
ceph_set_new_mons() {
[ -z "$1" ] && die "No 5.1.1 env ID provided as an arg, exiting"
[ -z "$2" ] && die "no 6.0 env ID provided as an arg, exiting"
for env in "$@"; do
check_env_exists $env ||
die "Env $env not found"
done
local controller1=$(list_nodes $1 "controller" | head -1)
test -z "$controller1" &&
die "No controllers found in Env $1"
local controllers=$(list_nodes $2 "controller")
test -z "$controllers" &&
die "No controllers found in Env $1"
local controllers_hostnames=$(echo -n $controllers | xargs -I{} \
ssh $SSH_ARGS root@{} hostname | cut -d. -f1)
local source_controllers=$(ssh $SSH_AGS root@$controller1 \
cat ${CEPH_CONF_DIR}/ceph.conf \
| awk -F= '$1 = /mon_host/ {print gensub("^ ", "", "", $2)}')
local source_controllers_mask=$(echo ${source_controllers} | sed 's/ /|/g')
# init global vars for Ceph config values
export MON_INITIAL_MEMBERS=""
export MON_HOSTS=""
# collect avialable dst controllers
for ctrl_host in ${controllers}; do
ip_match=`ssh $SSH_ARGS $ctrl_host ip addr \
| grep -m1 -E "${source_controllers_mask}" \
| sed -r 's/[ ]+?inet ([^\/]+).*/\1/'`
test -z "$ip_match" && continue
export MON_INITIAL_MEMBERS="$MON_INITIAL_MEMBERS `ssh $SSH_ARGS $ctrl_host hostname | cut -d. -f1`"
export MON_HOSTS="$MON_HOSTS $ip_match"
done
}
ceph_push_update_conf() {
[ -z "$1" ] && die "no 6.0 env ID provided as an arg, exiting"
local dst_base_dir=""
local ctrl_host_db_path
local controller1_db_path=${MON_STATE_PATH}/ceph-${CEPH_CONF_SRC_NODE}
local ceph_conf_dir
local orig_env=$(get_env_by_node ${CEPH_CONF_SRC_NODE#node-})
for ctrl_host in ${MON_INITIAL_MEMBERS}; do
ctrl_host_db_path="${MON_STATE_PATH}/ceph-${ctrl_host}"
ceph_get_conf_dir ${ctrl_host#node-}
ssh $SSH_ARGS root@$(get_host_ip_by_node_id ${ctrl_host#node-}) \
"rm -rf $CEPH_CONF_DIR;
mkdir $CEPH_CONF_DIR;
test -d $ctrl_host_db_path && rm -rf $ctrl_host_db_path;
:"
cat ${FUEL_CACHE}/env-${orig_env}-ceph.conf.tar.gz \
| ssh $SSH_ARGS $ctrl_host "gunzip | tar xvf - -C /"
ssh $SSH_ARGS root@$(get_host_ip_by_node_id ${ctrl_host#node-}) "
set -ex
mv $controller1_db_path $ctrl_host_db_path
rm $ctrl_host_db_path/sysvinit || echo "File sysvinit not found"
touch $ctrl_host_db_path/upstart
sed -i'' 's/^mon_initial_members =.*/mon_initial_members =$MON_INITIAL_MEMBERS/g;
s/^mon_host =.*/mon_host =$MON_HOSTS/g;
s/^host =.*/host = ${ctrl_host}/g' ${CEPH_CONF_DIR}/ceph.conf
cat ${CEPH_CONF_DIR}/ceph.conf | awk -F= '
\$1 ~ /^fsid/ {
fsid = \$2
}
\$1 ~ /^mon_initial_members/ {
split(\$2, members, \" \")
}
\$1 ~ /^mon_host/ {
split(\$2, host, \" \")
}
END {
printf(\"monmaptool --fsid %s --clobber --create \", fsid)
for (i in members) {
printf(\" --add %s %s\", members[i], host[i]);
}
printf(\" /tmp/monmap\n\")
}' | sh -
ceph-mon -i ${ctrl_host} --inject-monmap /tmp/monmap
"
done
for ctrl_host in "${MON_INITIAL_MEMBERS# }"; do
ssh root@$ctrl_host "restart ceph-mon id=$ctrl_host"
done
}
import_bootstrap_osd() {
local node
[ -z "$1" ] && die "No env ID provided, exiting"
node=$(list_nodes $1 controller | head -1)
ssh root@$(get_host_ip_by_node_id ${node#node-}) \
"ceph auth import -i /root/ceph.bootstrap-osd.keyring;
ceph auth caps client.bootstrap-osd mon 'allow profile bootstrap-osd'"
}
prepare_ceph_osd_upgrade() {
local seed_id
local nodes
local node
[ -z "${seed_id:=$1}" ] && die "No 6.0 env ID provided, exiting"
nodes=$(list_nodes $seed_id '(controller)')
for node in $nodes
do
ssh root@$node sh -c "'
f=\$(mktemp)
awk -f /dev/stdin /etc/ceph/ceph.conf > \$f
chmod 644 \$f && mv \$f /etc/ceph/ceph.conf
'" <<EOF
BEGIN {
flag = 0
}
/^$|^\[/ && flag == 1 {
flag = 0;
print "osd_crush_update_on_start = false"
}
/^\[global\]$/ {
flag = 1
}
{ print \$0 }
EOF
done
}

View File

@ -1,133 +0,0 @@
#!/bin/bash
pycmd() {
if ! python -c 'import octane'; then
yum install -y python-paramiko
pip install --no-index -e "$CWD/.." ||
die "Cannot install octane, exiting"
fi
local opts=""
if shopt -qo xtrace; then
opts="--debug -v"
fi
octane $opts "$@"
exit $?
}
check_deployment_status() {
# Verify operational status of environment.
[ -z "$1" ] && die "No env ID provided, exiting"
local status=$(fuel env --env $1 \
| awk -F"|" '/^'$1'/{print $2}' \
| tr -d ' ')
[ "$status" == 'new' ] || die "Environment is not operational, exiting"
}
list_ports() {
# On the host identified by first argument, list ports in bridge, identified by
# second argument.
[ -z "$1" ] && die "No hostname and bridge name provided, exiting"
[ -z "$2" ] && die "No bridge name provided, exiting"
echo -n "$(ssh root@$1 ovs-vsctl list-ports $2)"
}
create_patch_ports() {
# Create patch interface to connect logical interface to Public or Management
# network to the physical interface to that network.
local node
[ -d ${FUEL_CACHE}/deployment_$1.orig ] || die "Deployment information not found for env $1, exiting"
[ -z "$1" ] && die "No env ID provided, exiting"
local br_name=$2
local nodes=$(list_nodes $1 'controller')
for node in $nodes
do
local filename=$(ls ${FUEL_CACHE}/deployment_$1.orig/*_${node#node-}.yaml \
| head -1)
${BINPATH}/create-controller-ports $filename $br_name \
| xargs -I {} ssh root@$node {}
done
}
delete_patch_ports() {
local br_name
local ph_name
local node_ids
local node_id
local node
[ -z "$1" ] && die "No env ID and bridge name provided, exiting"
[ -z "$2" ] && die "No bridge name provided, exiting"
br_name=$2
for node in $(list_nodes $1 controller)
do
ph_name=$(list_ports $node $br_name \
| tr -d '"' \
| sed -nre 's/'$br_name'--(.*)/\1/p')
ssh root@${node} ovs-vsctl del-port $br_name ${br_name}--${ph_name}
ssh root@${node} ovs-vsctl del-port $ph_name ${ph_name}--${br_name}
done
}
upgrade_cics() {
[ -z "$1" ] && die "No 5.1.1 env ID provided, exiting"
[ -z "$2" ] && die "No 6.0 env ID provided, exiting"
check_deployment_status $2
set_pssh_hosts $1 && {
enable_apis
} && unset PSSH_RUN
set_pssh_hosts $2 && {
start_corosync_services
start_upstart_services
} && unset PSSH_RUN
for br_name in br-ex br-mgmt br-prv;
do
delete_patch_ports $1 $br_name
done
for br_name in br-ex br-mgmt;
do
create_patch_ports $2 $br_name
done
list_nodes $1 compute | xargs -I{} ${BINPATH}/upgrade-nova-compute.sh {}
}
upgrade_ceph() {
[ -z "$1" ] && die "No 5.1 and 6.0 env IDs provided, exiting"
[ -z "$2" ] && die "No 6.0 env ID provided, exiting"
ceph_extract_conf $1
ceph_set_new_mons "$@"
ceph_push_update_conf $2
import_bootstrap_osd $2
prepare_ceph_osd_upgrade $2
}
cleanup_nova_services() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
local cic=$(list_nodes $1 controller | head -1)
ssh root@${cic} '. /root/openrc;
nova service-list | grep nova \
| grep -Ev "('$(list_nodes $1 "(controller|compute|ceph-osd)" \
| sed ':a;N;$!ba;s/\n/|/g')')"' | awk -F \| '{print($2)}' | tr -d ' ' \
| xargs -I{} ssh root@${cic} ". /root/openrc; nova service-delete {}"
}
cleanup_neutron_services() {
[ -z "$1" ] && die "No 6.0 env ID provided, exiting"
local cic=$(list_nodes $1 controller | head -1)
ssh root@${cic} '. /root/openrc;
neutron agent-list | grep neutron \
| grep -Ev "('$(list_nodes $1 "(controller|compute|ceph-osd)" \
| sed ':a;N;$!ba;s/\n/|/g')')"' | awk -F \| '{print($2)}' | tr -d ' ' \
| xargs -I{} ssh root@${cic} ". /root/openrc; neutron agent-delete {}"
}
delete_fuel_resources() {
[ -z "$1" ] && die "No env ID provided, exiting"
local node=$(list_nodes $1 controller | head -1)
local host=$(get_host_ip_by_node_id ${node#node-})
scp $HELPER_PATH/delete_fuel_resources.py root@$host:/tmp
ssh root@$host ". openrc; python /tmp/delete_fuel_resources.py"
}
cleanup_fuel() {
revert_prepare_fuel
}

View File

@ -1,38 +0,0 @@
#!/bin/bash
export SVC_LIST="/root/services_list"
export SVC_LIST_TMP="${SVC_LIST}.tmp"
enable_apis() {
$PSSH_RUN "sed -i '/use_backend maintenance if TRUE/d' \
\$(grep -L 'mode *tcp' /etc/haproxy/conf.d/*)"
$PSSH_RUN "pkill haproxy"
}
start_corosync_services() {
$PSSH_RUN "pcs resource \
| awk '/Clone Set:/ {print \$4; getline; print \$1}' \
| sed 'N;s/\n/ /' | tr -d :[] \
| grep Stopped | awk '{print \$1}' \
| xargs -I@ sh -c \"crm resource start @\""
}
start_upstart_services() {
local command=$(cat <<EOF
crm_services=\$(pcs resource \
| awk '/Clone Set:/ {print \$4; getline; print \$1}' \
| sed 'N;s/\n/ /' \
| tr -d ':[]' | awk '{print substr(\$1,3)}');
for s in \$(<${SVC_LIST});
do
for cs in \$crm_services; do
if [ "\$cs" == "\$s" ]; then
continue 2;
fi;
done;
start \$s;
done;
EOF
)
$PSSH_RUN "$command"
}

View File

@ -1,16 +0,0 @@
#!/bin/bash -xe
run=".state"
[ -d "$run" ] || mkdir -p "$run"
patch_fuel_components() {
local cmp
[ -z "$1" ] && die "No component name provided, exiting"
for cmp in "$@";
do
[ -d "$PATCH_DIR/$cmp" ] || die "No dir for component $cmp, exiting"
pushd "$PATCH_DIR/$cmp"
[ -x "./update.sh" ] && ./update.sh
popd
done
}

View File

@ -1,33 +0,0 @@
# vim: syntax=sh
REVERT_PATH="$(readlink -e "$BASH_SOURCE")"
OCTANE_PATH="$(readlink -e "$(dirname "$REVERT_PATH")/..")"
## functions
revert_prepare_fuel () {
revert_patch_fuel_components puppet
revert_all_patches
}
revert_deployment_tasks() {
[ -z "$1" ] && die "No environment ID provided, exiting"
[ -d "$FUEL_CACHE" ] &&
[ -d "${FUEL_CACHE}/cluster_$1" ] &&
cp -pR "${FUEL_CACHE}/cluster_$1.orig" "${FUEL_CACHE}/cluster_$1"
}
revert_patch_fuel_components() {
local cmp
[ -z "$1" ] && die "No component name provided, exiting"
for cmp in "$@";
do
[ -d "$PATCH_DIR/$cmp" ] || die "No dir for component $cmp, exiting"
pushd "$PATCH_DIR/$cmp"
[ -x "./revert.sh" ] && ./revert.sh
popd
done
}
function revert_all_patches() {
PATCH_EXTRA_ARGS="-R" patch_all_containers
}

View File

@ -1,51 +0,0 @@
#!/bin/bash
yell() {
echo "$*" >&2
}
die() {
yell "${FUNCNAME[1]}: ${1}"
exit ${2:-1}
}
check_env_exists() {
[ -z "$1" ] && die "No environment ID provided, exiting"
local env_id=$1
fuel env --env-id $env_id | grep -qE "$env_id[ ]+?\|"
}
set_pssh_hosts() {
[ -z "$1" ] && die "No environment ID provided, exiting"
PSSH_RUN="pssh -i"
for node in $(list_nodes $1 ${2:controller});
do
PSSH_RUN+=" -H $node"
done
}
get_env_by_node() {
[ -z "$1" ] && die "No node ID provided, exiting"
echo "$(fuel node --node $1 \
| awk -F\| '/^'$1'/ {gsub(" ", "", $4); print $4}')"
}
get_host_ip_by_node_id() {
[ -z "$1" ] && die "No node ID provided, exiting"
echo $(fuel node | awk -F"|" '/^'$1' /{print($5)}' | tr -d ' ')
}
get_node_online() {
[ -z "$1" ] && die "No node ID provided, exiting"
fuel node --node "$1" | tail -1 | awk -F\| '{gsub(" ", "", $9);print($9)}'
}
list_nodes() {
local roles_re
[ -z "$1" ] && die "No env ID provided, exiting"
roles_re=${2:-controller}
echo "$(fuel node --env $1 \
| awk -F\| '($7 ~ /'$roles_re'/ || $8 ~ /'$roles_re'/) && $2 ~ /'$3'/ {
gsub(" ","",$1); print "node-" $1
}')"
}

View File

@ -17,8 +17,8 @@ PATCHES = [("nailgun", "/usr/lib/python2.6/site-packages/nailgun/extensions"
"/cluster_upgrade/", "patches/nailgun-clone-ips.patch")]
# TODO: use pkg_resources for patches
CWD = os.path.dirname(__file__) # FIXME
FUEL_CACHE = "/tmp/octane/deployment" # TODO: we shouldn't need this
PUPPET_DIR = "/etc/puppet/2014.2.2-6.1/modules"
FUEL_CACHE = "/tmp" # TODO: we shouldn't need this
PUPPET_DIR = "/etc/puppet/2015.1.0-7.0/modules"
BOOTSTRAP_INITRAMFS = "/var/www/nailgun/bootstrap/initramfs.img"
SSH_KEYS = ['/root/.ssh/id_rsa', '/root/.ssh/bootstrap.rsa']

View File

@ -0,0 +1,134 @@
commit 58895ee6973857fa8b4ee811e7dfa5005ae22aa1 (HEAD, puppet-tests/review/oleg_gelbukh/bp/partition-preservation, review/oleg_gelbukh/bp/partition-preservation)
Author: Oleg Gelbukh <ogelbukh@mirantis.com>
Date: Mon Jul 20 13:29:09 2015 +0000
Support Ceph OSD devices with existing data set
Partition preservation feature allows to preserve data on Ceph OSD device, but
later on Puppet will run 'ceph-deploy prepare' on every Ceph device in a system.
This call destroys data set on those devices.
To preserve data on Ceph OSD devices through deployment process, we need to
check if the device has Ceph data and if so, skip running 'ceph-deploy prepare'
on that device. Only prepared devices must be activated to avoid deployment
failure.
It was noted that the bug #1474510 causes ceph-osd service to start improperly,
thus preventing ceph-all init script from activating all the existing OSD
devices on boot.
Change-Id: I667fa6aab9d6f46c73bfb8ca0e267afede6049fb
Implements: blueprint partition-preservation
diff --git a/deployment/puppet/ceph/manifests/osds.pp b/deployment/puppet/ceph/manifests/osds.pp
index 3281415..a872bb8 100644
--- a/deployment/puppet/ceph/manifests/osds.pp
+++ b/deployment/puppet/ceph/manifests/osds.pp
@@ -1,4 +1,12 @@
-# prepare and bring online the devices listed in $::ceph::osd_devices
+# == Class: ceph::osd
+#
+# Prepare and bring online the OSD devices
+#
+# ==== Parameters
+#
+# [*devices*]
+# (optional) Array. This is the list of OSD devices identified by the facter.
+#
class ceph::osds (
$devices = $::ceph::osd_devices,
){
diff --git a/deployment/puppet/ceph/manifests/osds/osd.pp b/deployment/puppet/ceph/manifests/osds/osd.pp
index b8fd18e..153b84d 100644
--- a/deployment/puppet/ceph/manifests/osds/osd.pp
+++ b/deployment/puppet/ceph/manifests/osds/osd.pp
@@ -1,3 +1,7 @@
+# == Define: ceph::osds::osd
+#
+# Prepare and activate OSD nodes on the node
+#
define ceph::osds::osd () {
# ${name} format is DISK[:JOURNAL]
@@ -18,8 +22,8 @@ define ceph::osds::osd () {
tries => 2, # This is necessary because of race for mon creating keys
try_sleep => 1,
logoutput => true,
- unless => "grep -q ${data_device_name} /proc/mounts",
- } ->
+ unless => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, active' -e '${data_device_name} ceph data, prepared'",
+ } -> Exec["ceph-deploy osd activate ${deploy_device_name}"]
exec { "ceph-deploy osd activate ${deploy_device_name}":
command => "ceph-deploy osd activate ${deploy_device_name}",
@@ -27,7 +31,7 @@ define ceph::osds::osd () {
tries => 3,
logoutput => true,
timeout => 0,
- unless => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|${data_device_name}\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
+ onlyif => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, prepared'",
}
}
diff --git a/deployment/puppet/ceph/spec/classes/osds__spec.rb b/deployment/puppet/ceph/spec/classes/osds__spec.rb
index b4b7c1b..157bcea 100644
--- a/deployment/puppet/ceph/spec/classes/osds__spec.rb
+++ b/deployment/puppet/ceph/spec/classes/osds__spec.rb
@@ -19,7 +19,7 @@ describe 'ceph::osds', :type => :class do
end
context 'Class ceph::osds with devices and journals' do
- let (:params) {{ :devices => ['/dev/sdc1:/dev/sdc2', '/dev/sdd1:/dev/sdd2' ] }}
+ let (:params) {{ :devices => ['/dev/sdc1:/dev/sdc2', '/dev/sdd1:/dev/sdd2'] }}
it { should contain_firewall('011 ceph-osd allow') }
it { should contain_ceph__osds__osd('/dev/sdc1:/dev/sdc2') }
diff --git a/deployment/puppet/ceph/spec/defines/osd__spec.rb b/deployment/puppet/ceph/spec/defines/osd__spec.rb
index b510da3..9c54569 100644
--- a/deployment/puppet/ceph/spec/defines/osd__spec.rb
+++ b/deployment/puppet/ceph/spec/defines/osd__spec.rb
@@ -2,7 +2,7 @@ require 'spec_helper'
describe 'ceph::osds::osd', :type => :define do
let :facts do
- { :hostname => 'test.example', }
+ { :hostname => 'test.example' }
end
context 'Simple test' do
@@ -15,7 +15,7 @@ describe 'ceph::osds::osd', :type => :define do
'tries' => 2,
'try_sleep' => 1,
'logoutput' => true,
- 'unless' => "grep -q /dev/svv /proc/mounts",
+ 'unless' => "ceph-disk list | fgrep -q -e '/dev/svv ceph data, active' -e '/dev/svv ceph data, prepared'",
)
}
it { should contain_exec("ceph-deploy osd activate test.example:/dev/svv").with(
@@ -24,7 +24,7 @@ describe 'ceph::osds::osd', :type => :define do
'tries' => 3,
'logoutput' => true,
'timeout' => 0,
- 'unless' => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|/dev/svv\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
+ 'onlyif' => "ceph-disk list | fgrep -q -e '/dev/svv ceph data, prepared'",
)
}
end
@@ -38,7 +38,7 @@ describe 'ceph::osds::osd', :type => :define do
'tries' => 2,
'try_sleep' => 1,
'logoutput' => true,
- 'unless' => "grep -q /dev/sdd /proc/mounts",
+ 'unless' => "ceph-disk list | fgrep -q -e '/dev/sdd ceph data, active' -e '/dev/sdd ceph data, prepared'",
)
}
it { should contain_exec("ceph-deploy osd activate test.example:/dev/sdd:/dev/journal").with(
@@ -47,7 +47,7 @@ describe 'ceph::osds::osd', :type => :define do
'tries' => 3,
'logoutput' => true,
'timeout' => 0,
- 'unless' => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|/dev/sdd\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
+ 'onlyif' => "ceph-disk list | fgrep -q -e '/dev/sdd ceph data, prepared'",
)
}
end

View File

@ -25,6 +25,73 @@ def mock_os_path(mocker):
return res
def test_find_node_deployment_info():
roles = ['controller', 'primary-controller']
node = mock.Mock()
node.id = 1
res = env_util.find_node_deployment_info(node, roles, DEPLOYMENT_INFO)
assert res == DEPLOYMENT_INFO[0]
def test_find_node_deployment_info_none():
roles = ['controller', 'primary-controller']
node = mock.Mock()
node.id = 2
res = env_util.find_node_deployment_info(node, roles, DEPLOYMENT_INFO)
assert res is None
DEPLOYMENT_INFO = [{
'uid': '1',
'role': 'primary-controller',
'nodes': [{
'uid': '1',
'role': 'primary-controller',
'name': 'test',
}, {
'uid': '1',
'role': 'zabbix',
'name': 'test',
}, {
'uid': '2',
'role': 'compute',
'name': 'test2',
}],
}, {
'uid': '1',
'role': 'zabbix',
'nodes': [{
'uid': '1',
'role': 'primary-controller',
'name': 'test',
}, {
'uid': '1',
'role': 'zabbix',
'name': 'test',
}, {
'uid': '2',
'role': 'compute',
'name': 'test2',
}],
}, {
'uid': '2',
'role': 'compute',
'nodes': [{
'uid': '1',
'role': 'primary-controller',
'name': 'test',
}, {
'uid': '1',
'role': 'zabbix',
'name': 'test',
}, {
'uid': '2',
'role': 'compute',
'name': 'test2',
}],
}]
def test_parse_tenant_get():
res = env_util.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
assert res == 'e26c8079d61f46c48f9a6d606631ee5e'

View File

@ -11,6 +11,8 @@
# under the License.
import mock
import pytest
from xml.etree import ElementTree
from octane.util import maintenance
from octane.util import subprocess
@ -21,6 +23,32 @@ def test_get_crm_services():
assert sorted(res) == CRM_XML_PARSE_RESULT
@pytest.mark.parametrize("resource_list,status,expected_result", [
(["master_p_rabbitmq-server", "vip__management_old"], False, False),
(["master_p_rabbitmq-server", "vip__management_old"], True, False),
(["master_p_rabbitmq-server", "p_ceilometer-alarm-evaluator"], False,
True),
(["clone_p_neutron-metadata-agent", "vip__management_old",
"group__zabbix-server"], True, True),
(["test1", "vip__management_old"], True, False),
(["test1", "test2"], False, True),
])
def test_resources_synced(resource_list, status, expected_result):
res = maintenance.is_resources_synced(resource_list, CRM_XML_STATUS_SAMPLE,
status)
assert res is expected_result
def test_resources_status():
data = ElementTree.fromstring(CRM_XML_STATUS_SAMPLE)
resources = next(el for el in data if el.tag == 'resources')
result = []
for resource in resources:
result.append(maintenance.is_resource_active(resource))
assert result == [True, False, False, True, True]
def test_stop_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
mock_subprocess, node):
get_one_controller = mocker.patch('octane.util.env.get_one_controller')
@ -34,6 +62,9 @@ def test_stop_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
mocker.patch('time.sleep')
wait_for_services = \
mocker.patch.object(maintenance, 'wait_for_corosync_services_sync')
maintenance.stop_corosync_services('env')
assert not mock_subprocess.called
@ -41,6 +72,8 @@ def test_stop_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
mock_ssh_call_output.assert_called_once_with(['cibadmin', '--query',
'--scope', 'resources'],
node=node)
assert wait_for_services.call_args_list == \
[mock.call('env', ['s1', 's2'], 'stop')]
assert mock_ssh_call.call_args_list == [
mock.call(['crm', 'resource', 'stop', 's1'], node=node),
mock.call(['crm', 'resource', 'stop', 's1'], node=node),
@ -57,10 +90,16 @@ def test_start_corosync_services(mocker, mock_ssh_call, mock_ssh_call_output,
mock_ssh_call.side_effect = \
[None, subprocess.CalledProcessError(1, 'cmd'), None]
wait_for_services = \
mocker.patch.object(maintenance, 'wait_for_corosync_services_sync')
maintenance.start_corosync_services('env')
mock_ssh_call_output.assert_called_once_with(
['cibadmin', '--query', '--scope', 'resources'], node=node)
assert wait_for_services.call_args_list == \
[mock.call('env', ['test_service1', 'test_service2'], 'start')]
assert mock_ssh_call.call_args_list == [
mock.call(['crm', 'resource', 'start', 'test_service1'], node=node),
mock.call(['crm', 'resource', 'start', 'test_service2'], node=node),
@ -458,12 +497,61 @@ CRM_XML_SAMPLE = """
</resources>
"""[1:] # noqa
CRM_XML_PARSE_RESULT = [
'clone_p_dns',
'clone_p_haproxy',
'clone_p_heat-engine',
'clone_p_mysql',
'clone_p_neutron-dhcp-agent',
'clone_p_neutron-l3-agent',
'clone_p_neutron-metadata-agent',
'clone_p_neutron-plugin-openvswitch-agent',
'clone_p_ntp',
'clone_p_vrouter',
'group__zabbix-server',
'master_p_conntrackd',
'master_p_rabbitmq-server',
'p_ceilometer-agent-central',
'p_ceilometer-alarm-evaluator',
'vip__management',
'vip__public',
'vip__vrouter',
'vip__vrouter_pub'
]
CRM_XML_STATUS_SAMPLE = """
<crm_mon version="1.1.12">
<resources>
<resource id="vip__management_old" resource_agent="ocf::mirantis:ns_IPaddr2" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
<node name="node-2" id="node-2" cached="false"/>
</resource>
<resource id="p_ceilometer-alarm-evaluator" resource_agent="ocf::mirantis:ceilometer-alarm-evaluator" role="Started" active="false" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0" />
<clone id="master_p_rabbitmq-server" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false" >
<resource id="p_rabbitmq-server" resource_agent="ocf::mirantis:rabbitmq-server" role="Master" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
<node name="node-3" id="node-3" cached="false"/>
</resource>
<resource id="p_rabbitmq-server" resource_agent="ocf::mirantis:rabbitmq-server" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
<node name="node-2" id="node-2" cached="false"/>
</resource>
<resource id="p_rabbitmq-server" resource_agent="ocf::mirantis:rabbitmq-server" role="Stopped" active="false" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0" />
</clone>
<clone id="clone_p_neutron-metadata-agent" >
<resource id="p_neutron-metadata-agent" resource_agent="ocf::mirantis:neutron-agent-metadata" active="true" >
<node name="node-3" id="node-3" cached="false"/>
</resource>
<resource id="p_neutron-metadata-agent" resource_agent="ocf::mirantis:neutron-agent-metadata" active="true" >
<node name="node-2" id="node-2" cached="false"/>
</resource>
<resource id="p_neutron-metadata-agent" resource_agent="ocf::mirantis:neutron-agent-metadata" active="true" >
<node name="node-5" id="node-5" cached="false"/>
</resource>
</clone>
<group id="group__zabbix-server" number_resources="2" >
<resource id="vip__zbx_vip_mgmt" resource_agent="ocf::fuel:ns_IPaddr2" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
<node name="node-6" id="6" cached="false"/>
</resource>
<resource id="p_zabbix-server" resource_agent="ocf::fuel:zabbix-server" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
<node name="node-6" id="6" cached="false"/>
</resource>
</group>
</resources>
</crm_mon>
"""[1:] # noqa

View File

@ -12,6 +12,7 @@
import subprocess
from mock import call
from mock import Mock
from octane.helpers import network
@ -87,3 +88,225 @@ def test_create_overlay_network(mocker):
node1.id)
assert mock_ssh.call_args_list == expected_args
def test_delete_overlay_network(mocker):
node = Mock()
deployment_info = {
'network_scheme': {
'transformations': [{
'action': 'add-br',
'name': 'br-ex',
'provider': 'ovs',
}, {
'action': 'add-br',
'name': 'br-mgmt',
}]
}
}
mock_ssh = mocker.patch('octane.util.ssh.call')
mock_ovs_tuns = mocker.patch('octane.helpers.network.list_tunnels_ovs')
mock_ovs_tuns.return_value = ['br-ex--gre-10.10.10.2']
mock_lnx_tun = mocker.patch('octane.helpers.network.list_tunnels_lnx')
mock_lnx_tun.return_value = ['gre3-3']
expected_args = [
call(['ovs-vsctl', 'del-port', 'br-ex', 'br-ex--gre-10.10.10.2'],
node=node),
call(['brctl', 'delif', 'br-mgmt', 'gre3-3'], node=node),
call(['ip', 'link', 'delete', 'gre3-3'], node=node),
]
network.delete_overlay_networks(node, deployment_info)
assert mock_ssh.call_args_list == expected_args
def test_delete_patch_ports(mocker):
node = Mock()
mock_ssh = mocker.patch('octane.util.ssh.call')
expected_args = [
call(['ovs-vsctl', 'del-port', 'br-ovs-bond1', 'br-ovs-bond1--br-ex'],
node=node),
call(['ovs-vsctl', 'del-port', 'br-ex', 'br-ex--br-ovs-bond1'],
node=node),
call(['ovs-vsctl', 'del-port', 'br-ovs-bond2',
'br-ovs-bond2--br-mgmt'],
node=node),
call(['ovs-vsctl', 'del-port', 'br-mgmt', 'br-mgmt--br-ovs-bond2'],
node=node),
]
network.delete_patch_ports(node, DEPLOYMENT_INFO_5_1)
assert mock_ssh.call_args_list == expected_args
def test_delete_lnx_ports(mocker):
node = Mock()
mock_ssh = mocker.patch('octane.util.ssh.call')
expected_args = [
call(['brctl', 'delif', 'br-ex', 'eth0.130'],
node=node),
call(['brctl', 'delif', 'br-mgmt', 'eth1.220'],
node=node),
]
network.delete_patch_ports(node, DEPLOYMENT_INFO_7_0)
assert mock_ssh.call_args_list == expected_args
def test_create_patch_ports_5_1(mocker):
node = Mock()
mock_ssh = mocker.patch('octane.util.ssh.call')
expected_args = [
call(['ovs-vsctl', 'add-port', 'br-ex', 'br-ex--br-ovs-bond1',
'trunks=[0]', '--', 'set', 'interface', 'br-ex--br-ovs-bond1',
'type=patch', 'options:peer=br-ovs-bond1--br-ex'],
node=node),
call(['ovs-vsctl', 'add-port', 'br-ovs-bond1', 'br-ovs-bond1--br-ex',
'trunks=[0]', '--', 'set', 'interface', 'br-ovs-bond1--br-ex',
'type=patch', 'options:peer=br-ex--br-ovs-bond1'],
node=node),
call(['ovs-vsctl', 'add-port', 'br-mgmt', 'br-mgmt--br-ovs-bond2',
'--', 'set', 'interface', 'br-mgmt--br-ovs-bond2', 'type=patch',
'options:peer=br-ovs-bond2--br-mgmt'],
node=node),
call(['ovs-vsctl', 'add-port', 'br-ovs-bond2', 'br-ovs-bond2--br-mgmt',
'tag=102', '--', 'set', 'interface', 'br-ovs-bond2--br-mgmt',
'type=patch', 'options:peer=br-mgmt--br-ovs-bond2'],
node=node)
]
network.create_patch_ports(node, DEPLOYMENT_INFO_5_1)
assert mock_ssh.call_args_list == expected_args
def test_create_patch_ports_7_0(mocker):
node = Mock()
mock_ssh = mocker.patch('octane.util.ssh.call')
expected_args = [
call(['ovs-vsctl', 'add-port', 'br-ex', 'br-ex--br-ovs-bond1', '--',
'set', 'interface', 'br-ex--br-ovs-bond1', 'type=patch',
'options:peer=br-ovs-bond1--br-ex'],
node=node),
call(['ovs-vsctl', 'add-port', 'br-ovs-bond1', 'br-ovs-bond1--br-ex',
'--', 'set', 'interface', 'br-ovs-bond1--br-ex', 'type=patch',
'options:peer=br-ex--br-ovs-bond1'],
node=node),
call(['ovs-vsctl', 'add-port', 'br-mgmt', 'br-mgmt--br-ovs-bond2',
'--', 'set', 'interface', 'br-mgmt--br-ovs-bond2', 'type=patch',
'options:peer=br-ovs-bond2--br-mgmt'],
node=node),
call(['ovs-vsctl', 'add-port', 'br-ovs-bond2', 'br-ovs-bond2--br-mgmt',
'tag=102', '--', 'set', 'interface', 'br-ovs-bond2--br-mgmt',
'type=patch', 'options:peer=br-mgmt--br-ovs-bond2'],
node=node)
]
network.create_patch_ports(node, DEPLOYMENT_INFO_OVS_7_0)
assert mock_ssh.call_args_list == expected_args
DEPLOYMENT_INFO_5_1 = {
'openstack_version': '2014.1.3-5.1.1',
'network_scheme': {
'transformations': [{
'action': 'add-br',
'name': 'br-ex',
}, {
'action': 'add-patch',
'bridges': [
'br-ovs-bond1',
'br-ex'
],
'trunks': [
0
]
}, {
'action': 'add-patch',
'bridges': [
'br-ovs-bond2',
'br-mgmt'
],
'tags': [
102,
0
]
}, {
'action': 'add-br',
'name': 'br-mgmt',
}]
}
}
DEPLOYMENT_INFO_OVS_7_0 = {
'openstack_version': '2015.1.0-7.0',
'network_scheme': {
'transformations': [{
'action': 'add-br',
'name': 'br-ex',
'provider': 'ovs',
}, {
'action': 'add-patch',
'bridges': [
'br-ovs-bond1',
'br-ex'
],
'vlan_ids': [
0,
0
]
}, {
'action': 'add-patch',
'bridges': [
'br-ovs-bond2',
'br-mgmt'
],
'vlan_ids': [
102,
0
]
}, {
'action': 'add-br',
'name': 'br-mgmt',
'provider': 'ovs'
}]
}
}
DEPLOYMENT_INFO_7_0 = {
'openstack_version': '2015.1.0-7.0',
'network_scheme': {
'transformations': [{
'action': 'add-br',
'name': 'br-ex',
}, {
'action': 'add-port',
'name': 'eth0.130',
'bridge': 'br-ex'
}, {
'action': 'add-br',
'name': 'br-mgmt',
}, {
'action': 'add-port',
'name': 'eth1.220',
'bridge': 'br-mgmt'
}]
}
}

View File

@ -0,0 +1,77 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.helpers import transformations as ts
def test_reset_gw_admin(mocker):
host_config = DEPLOYMENT_INFO
gateway = '10.10.10.10'
res = ts.reset_gw_admin(host_config, gateway)
assert res['network_scheme']['endpoints']['br-fw-admin']['gateway'] == \
gateway
def test_get_network_gw(mocker):
net_name = 'test_net'
gateway = '10.10.10.10'
data = {
'networks': [
{
'name': net_name,
'gateway': gateway
}
]
}
res = ts.get_network_gw(data, net_name)
assert res == gateway
def test_get_network_gw_no_gw(mocker):
net_name = 'test_net'
data = {
'networks': [{
'name': net_name,
}]
}
res = ts.get_network_gw(data, net_name)
assert res is None
def test_get_network_gw_no_net(mocker):
net_name = 'test_net'
data = {
'networks': [{
'name': 'another_test_net',
'gateway': '10.10.10.10'
}]
}
res = ts.get_network_gw(data, net_name)
assert res is None
DEPLOYMENT_INFO = {
'network_scheme': {
'endpoints': {
'br-ex': {'gateway': '172.16.0.1', },
'br-fw-admin': {}
}
}
}

View File

@ -0,0 +1,19 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.upgrade_ceph.upgrade_ceph')
octane_app.run(["upgrade-ceph", "1", "2"])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2)

View File

@ -0,0 +1,98 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import mock
import pytest
from octane.util import node as node_util
from octane.util import ssh
NODES = [
{'fqdn': 'node-1',
'network_data': [{'name': 'management', 'ip': '10.20.0.2'},
{'name': 'public', 'ip': '172.167.0.2'}]},
{'fqdn': 'node-2',
'network_data': [{'name': 'management', 'ip': '10.20.0.3'},
{'name': 'public', 'ip': '172.167.0.3'}]},
{'fqdn': 'node-3',
'network_data': [{'name': 'management', 'ip': '10.20.0.4'},
{'name': 'public', 'ip': '172.167.0.4'}]},
]
@pytest.mark.parametrize('node_data,network_name,expected_ip', [
(NODES[0], 'management', '10.20.0.2'),
(NODES[0], 'storage', None),
({'network_data': []}, 'management', None),
])
def test_get_ip(node_data, network_name, expected_ip):
node = create_node(node_data)
ip = node_util.get_ip(network_name, node)
assert ip == expected_ip
def create_node(data):
return mock.Mock(data=data, spec_set=['data'])
@pytest.fixture
def nodes():
return map(create_node, NODES)
@pytest.mark.parametrize("network_name,expected_ips", [
('management', ['10.20.0.2', '10.20.0.3', '10.20.0.4']),
('public', ['172.167.0.2', '172.167.0.3', '172.167.0.4']),
])
def test_get_ips(nodes, network_name, expected_ips):
ips = node_util.get_ips(network_name, nodes)
assert ips == expected_ips
def test_get_hostnames(nodes):
hostnames = node_util.get_hostnames(nodes)
assert hostnames == ['node-1', 'node-2', 'node-3']
def test_tar_files(node, mock_ssh_popen, mock_open):
content = b'fake data\nin\nthe\narchive'
proc = mock_ssh_popen.return_value.__enter__.return_value
proc.stdout = io.BytesIO(content)
buf = io.BytesIO()
mock_open.return_value.write.side_effect = buf.write
node_util.tar_files('filename', node, 'a.file', 'b.file')
mock_ssh_popen.assert_called_once_with(
['tar', '-czvP', 'a.file', 'b.file'],
stdout=ssh.PIPE, node=node)
mock_open.assert_called_once_with('filename', 'wb')
assert buf.getvalue() == content
def test_untar_files(node, mock_ssh_popen, mock_open):
content = b'fake data\nin\nthe\narchive'
proc = mock_ssh_popen.return_value.__enter__.return_value
buf = io.BytesIO()
proc.stdin.write = buf.write
mock_open.return_value = io.BytesIO(content)
node_util.untar_files('filename', node)
mock_ssh_popen.assert_called_once_with(['tar', '-xzv', '-C', '/'],
stdin=ssh.PIPE, node=node)
mock_open.assert_called_once_with('filename', 'rb')
assert buf.getvalue() == content

View File

@ -22,6 +22,8 @@ from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from fuelclient.objects import task as task_obj
from octane.helpers import tasks as tasks_helpers
from octane.helpers import transformations
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
@ -225,37 +227,38 @@ def move_nodes(env, nodes):
node_id = node.data['id']
subprocess.call(
["fuel2", "env", "move", "node", str(node_id), str(env_id)])
LOG.info("Nodes provision started. Please wait...")
wait_for_nodes(nodes, "provisioned")
def provision_nodes(env, nodes):
env.install_selected_nodes('provision', nodes)
wait_for_nodes(nodes, "provisioned")
LOG.info("Nodes provision started. Please wait...")
wait_for_nodes(nodes, "provisioned", timeout=180 * 60)
def deploy_nodes(env, nodes):
env.install_selected_nodes('deploy', nodes)
wait_for_nodes(nodes, "ready")
LOG.info("Nodes deploy started. Please wait...")
wait_for_nodes(nodes, "ready", timeout=180 * 60)
wait_for_tasks(env, "running")
def deploy_changes(env, nodes):
env.deploy_changes()
LOG.info("Nodes deploy started. Please wait...")
wait_for_env(env, "operational", timeout=180 * 60)
def merge_deployment_info(env):
default_info = env.get_default_facts('deployment')
def get_deployment_info(env):
deployment_info = []
try:
deployment_info = env.get_facts('deployment')
except fuelclient.cli.error.ServerDataException:
LOG.warn('Deployment info is unchanged for env: %s',
env.id)
deployment_info = []
for info in default_info:
if not (info['uid'], info['role']) in [(i['uid'], i['role'])
for i in deployment_info]:
deployment_info.append(info)
deployment_info = [x for x in deployment_info
if x['role'] != 'primary-controller']
return deployment_info
@ -275,3 +278,89 @@ def set_network_template(env, filename):
with open(filename, 'r') as f:
data = f.read()
env.set_network_template_data(yaml.load(data))
def update_deployment_info(env, isolated):
default_info = env.get_default_facts('deployment')
network_data = env.get_network_data()
gw_admin = transformations.get_network_gw(network_data,
"fuelweb_admin")
if isolated:
# From backup_deployment_info
backup_path = os.path.join(
magic_consts.FUEL_CACHE,
"deployment_{0}.orig".format(env.id),
)
if not os.path.exists(backup_path):
os.makedirs(backup_path)
# Roughly taken from Environment.write_facts_to_dir
for info in default_info:
fname = os.path.join(
backup_path,
"{0}_{1}.yaml".format(info['role'], info['uid']),
)
with open(fname, 'w') as f:
yaml.safe_dump(info, f, default_flow_style=False)
deployment_info = []
for info in default_info:
if isolated:
transformations.remove_ports(info)
transformations.reset_gw_admin(info, gw_admin)
# From run_ping_checker
info['run_ping_checker'] = False
transformations.remove_predefined_nets(info)
deployment_info.append(info)
env.upload_facts('deployment', deployment_info)
tasks = env.get_deployment_tasks()
tasks_helpers.skip_tasks(tasks)
env.update_deployment_tasks(tasks)
def find_node_deployment_info(node, roles, data):
node_roles = [n['role']
for n in data[0]['nodes'] if str(node.id) == n['uid']]
if not set(roles) & set(node_roles):
return None
for info in data:
if info['uid'] == str(node.id):
return info
return None
def get_backup_deployment_info(env_id):
deployment_info = []
backup_path = os.path.join(
magic_consts.FUEL_CACHE, 'deployment_{0}.orig'.format(env_id))
if not os.path.exists(backup_path):
return None
for filename in os.listdir(backup_path):
filepath = os.path.join(backup_path, filename)
with open(filepath) as info_file:
info = yaml.safe_load(info_file)
deployment_info.append(info)
return deployment_info
def collect_deployment_info(env, nodes):
deployment_info = []
for node in nodes:
info = get_astute_yaml(env, node)
deployment_info.append(info)
return deployment_info
def iter_deployment_info(env, roles):
controllers = list(get_controllers(env))
full_info = get_backup_deployment_info(env.id)
roles = ['primary-controller', 'controller']
if not full_info:
full_info = collect_deployment_info(env, controllers)
for node in controllers:
info = find_node_deployment_info(node, roles, full_info)
yield (node, info)

View File

@ -51,34 +51,121 @@ def disable_apis(env):
new.write(use_backend_line)
ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
def enable_apis(env):
controllers = list(env_util.get_controllers(env))
maintenance_line = 'backend maintenance'
use_backend_line = ' use_backend maintenance if TRUE'
for node in controllers:
sftp = ssh.sftp(node)
sftp.chdir('/etc/haproxy')
with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
for line in old:
if maintenance_line in line:
continue
new.write(line)
sftp.chdir('/etc/haproxy/conf.d')
for f in sftp.listdir():
with ssh.update_file(sftp, f) as (old, new):
for line in old:
if use_backend_line in line:
continue
new.write(line)
ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
_default_exclude_services = ('p_mysql', 'p_haproxy', 'p_dns', 'p_ntp', 'vip',
'p_conntrackd', 'p_rabbitmq-server',
'clone_p_vrouter')
def get_crm_services(status_out, exclude=_default_exclude_services):
def get_crm_services(status_out):
data = ElementTree.fromstring(status_out)
for resource in data:
name = resource.get('id')
if any(service in name for service in exclude):
continue
yield name
yield resource.get('id')
def start_corosync_services(env):
manage_corosync_services(env, 'start')
def stop_corosync_services(env):
manage_corosync_services(env, 'stop')
def manage_corosync_services(env, status):
node = env_util.get_one_controller(env)
status_out = ssh.call_output(['cibadmin', '--query', '--scope',
'resources'], node=node)
for service in get_crm_services(status_out):
services_list = []
for res in get_crm_services(status_out):
if any(service in res for service in _default_exclude_services):
continue
services_list.append(res)
for service in services_list:
while True:
try:
ssh.call(['crm', 'resource', 'stop', service],
ssh.call(['crm', 'resource', status, service],
node=node)
except subprocess.CalledProcessError:
pass
# Sometimes pacemaker rejects part of requests what it is
# not able to process. Sleep was added to mitigate this risk.
time.sleep(1)
else:
break
time.sleep(60)
wait_for_corosync_services_sync(env, services_list, status)
def wait_for_corosync_services_sync(env, resource_list, status,
timeout=1200, check_freq=20):
status_bool = status == 'start'
node = env_util.get_one_controller(env)
started_at = time.time()
while True:
crm_out = ssh.call_output(['crm_mon', '--as-xml'], node=node)
if is_resources_synced(resource_list, crm_out, status_bool):
return
if time.time() - started_at >= timeout:
raise Exception("Timeout waiting for corosync cluster for env %s"
" to be synced" % env.id)
time.sleep(check_freq)
def is_resources_synced(resources, crm_out, status):
def get_resource(resources, resource_id):
for resource in resources:
if resource.get('id') == resource_id:
return resource
return None
data = ElementTree.fromstring(crm_out)
mon_resources = data.find('resources')
for resource in resources:
res = get_resource(mon_resources, resource)
if not (is_resource_active(res) is status):
return False
return True
# Resources are fetching from the output of 'crm_mon' command. This command
# doesn't return resource if it's not started and we can consider 'absent'
# resource as disabled.
def is_resource_active(resource):
if resource is None:
return False
if resource.tag == 'resource':
return is_primitive_active(resource)
for primitive in resource:
if not is_primitive_active(primitive):
return False
return True
def is_primitive_active(resource):
if resource.get('active') == 'true':
return True
return False
def stop_upstart_services(env):
@ -106,21 +193,6 @@ def stop_upstart_services(env):
ssh.call(['stop', service], node=node)
def start_corosync_services(env):
node = next(env_util.get_controllers(env))
status_out = ssh.call_output(['cibadmin', '--query', '--scope',
'resources'], node=node)
for service in get_crm_services(status_out):
while True:
try:
ssh.call(['crm', 'resource', 'start', service],
node=node)
except subprocess.CalledProcessError:
pass
else:
break
def start_upstart_services(env):
controllers = list(env_util.get_controllers(env))
for node in controllers:
@ -134,3 +206,36 @@ def start_upstart_services(env):
to_start = svc_file.read().splitlines()
for service in to_start:
ssh.call(['start', service], node=node)
def stop_cluster(env):
cmds = [['pcs', 'cluster', 'kill']]
controllers = list(env_util.get_controllers(env))
for node in controllers:
for cmd in cmds:
ssh.call(cmd, node=node)
def start_cluster(env):
major_version = env.data['fuel_version'].split('.')[0]
cmds = []
if int(major_version) < 6:
cmds = [['service', 'corosync', 'start']]
else:
cmds = [['pcs', 'cluster', 'start']]
controllers = list(env_util.get_controllers(env))
for node in controllers:
for cmd in cmds:
ssh.call(cmd, node=node)
# When we start cluster we should wait while resources from constant
# `_default_exclude_services` become up and running. BTW, We don't touch
# these resources in stop/start corosync resources methods at all.
node = env_util.get_one_controller(env)
status_out = ssh.call_output(['cibadmin', '--query', '--scope',
'resources'], node=node)
services_list = []
for res in get_crm_services(status_out):
if any(service in res for service in _default_exclude_services):
services_list.append(res)
wait_for_corosync_services_sync(env, services_list, 'start')

View File

@ -10,7 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import shutil
import socket
import sys
import time
@ -33,6 +35,41 @@ def preserve_partition(node, partition):
node.upload_node_attribute('disks', disks)
def get_ip(network_name, node):
for net in node.data['network_data']:
if net['name'] == network_name:
return net['ip']
def get_ips(network_name, nodes):
get_network_ip = functools.partial(get_ip, network_name)
return map(get_network_ip, nodes)
def get_hostnames(nodes):
return [node.data['fqdn'] for node in nodes]
def tar_files(filename, node, *files):
cmd = ['tar', '-czvP']
cmd.extend(files)
with ssh.popen(cmd, stdout=ssh.PIPE, node=node) as proc:
with open(filename, 'wb') as f:
shutil.copyfileobj(proc.stdout, f)
def untar_files(filename, node):
cmd = ['tar', '-xzv', '-C', '/']
with ssh.popen(cmd, stdin=ssh.PIPE, node=node) as proc:
with open(filename, 'rb') as f:
shutil.copyfileobj(f, proc.stdin)
def get_hostname_remotely(node):
hostname = ssh.call_output(['hostname'], node=node)
return hostname[:-1]
def reboot_nodes(nodes, timeout=600):
old_clients = dict((node, ssh.get_client(node)) for node in nodes)
for node in nodes:

View File

@ -17,5 +17,3 @@ python-keystoneclient<=0.11.1 # the last version without too fresh requirements
python-fuelclient>=6.1
cliff>=1.7.0,<=1.9.0 # should already be pulled by python-fuelclient
paramiko==1.13.0
pyzabbix==0.7.3

View File

@ -24,6 +24,12 @@ classifier =
packages =
octane
# NOTE(akscram): The extras section properly handled in pbr>=1.0.0 and
# represents the extras_require parameter.
# [extras]
# zabbix =
# pyzabbix==0.7.3
[entry_points]
console_scripts =
octane = octane.app:main
@ -33,13 +39,15 @@ octane =
upgrade-env = octane.commands.upgrade_env:UpgradeEnvCommand
upgrade-node = octane.commands.upgrade_node:UpgradeNodeCommand
upgrade-db = octane.commands.upgrade_db:UpgradeDBCommand
upgrade-ceph = octane.commands.upgrade_ceph:UpgradeCephCommand
install-node = octane.commands.install_node:InstallNodeCommand
upgrade-control = octane.commands.upgrade_controlplane:UpgradeControlPlaneCommand
rollback-control = octane.commands.rollback_controlplane:RollbackControlPlaneCommand
sync-networks = octane.commands.sync_networks:SyncNetworksCommand
cleanup = octane.commands.cleanup:CleanupCommand
sync-images = octane.commands.sync_images:SyncImagesCommand
sync-images-prepare = octane.commands.sync_images:SyncImagesPrepareCommand
update-plugin-settings = octane.commands.update_plugin_settings:UpdatePluginSettingsCommand
update-plugin-settings = octane.commands.update_plugin_settings:UpdatePluginSettingsCommand [zabbix]
octane.handlers.upgrade =
controller = octane.handlers.upgrade.controller:ControllerUpgrade
compute = octane.handlers.upgrade.compute:ComputeUpgrade

View File

@ -23,4 +23,7 @@ except ImportError:
setuptools.setup(
setup_requires=['pbr'],
# NOTE(akscram): The extras_require parameter properly handled from
# setup.cfg in pbr since 1.0.0.
extras_require={'zabbix': ['pyzabbix==0.7.3']},
pbr=True)

View File

@ -1,5 +1,5 @@
%define name fuel-octane
%{!?version: %define version 1}
%{!?version: %define version 1.0.0}
%{!?release: %define release 1}
Name: %{name}

View File

@ -2,3 +2,4 @@ hacking<0.11,>=0.10.0
pytest<2.8.0
pytest-cov
pytest-mock
pyzabbix==0.7.3