Add scripts for jenkins tests

This commit is contained in:
Łukasz Oleś 2016-05-06 09:24:14 +02:00
parent fa9f102443
commit aac49336f8
11 changed files with 385 additions and 9 deletions

3
.gitignore vendored
View File

@ -6,3 +6,6 @@ deploy/solar
solar-k8s-modules
tmp
solar-resources
# vim files
*.swp

View File

@ -1,2 +1,3 @@
sudo dnf install -y python python-dnf ansible libselinux-python
sudo /usr/sbin/setenforce 0
sudo /usr/sbin/setenforce 0 || echo 'ok'
sudo systemctl stop firewalld.service || echo 'ok'

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python
import argparse
import os
import re
from netaddr import IPAddress
@ -14,6 +15,8 @@ from solar.events.controls import Dep
DEFAULT_MASTER_NODE_RESOURCE_NAME = 'kube-node-master'
MASTER_NODE_RESOURCE_NAME = None
CONFIG_NAME = 'config.yaml'
DEFAULT_CONFIG_NAME = 'config.yaml.sample'
def create_config(dns_config):
@ -87,13 +90,13 @@ def setup_nodes(config, user_config, num=1, existing_nodes=None):
if existing_nodes:
kube_nodes = [
setup_slave_node(config, user_config[i], kubernetes_master,
calico_master, internal_network, i, node)
setup_slave_node(config, kubernetes_master,
calico_master, internal_network, i, None, node)
for (i, node) in enumerate(existing_nodes)]
else:
kube_nodes = [
setup_slave_node(config, user_config[i], kubernetes_master,
calico_master, internal_network, i)
setup_slave_node(config, kubernetes_master,
calico_master, internal_network, i, user_config[i])
for i in xrange(num)]
kube_master = rs.load(MASTER_NODE_RESOURCE_NAME)
@ -107,8 +110,8 @@ def setup_nodes(config, user_config, num=1, existing_nodes=None):
})
def setup_slave_node(config, user_config, kubernetes_master, calico_master,
internal_network, i, existing_node=None):
def setup_slave_node(config, kubernetes_master, calico_master,
internal_network, i, user_config=None, existing_node=None):
j = i + 1
if existing_node:
kube_node = existing_node
@ -276,8 +279,18 @@ def get_args(user_config):
def get_user_config():
with open('config.yaml') as conf:
config = yaml.load(conf)
global CONFIG_NAME
global DEFAULT_CONFIG_NAME
if os.path.exists(CONFIG_NAME):
with open(CONFIG_NAME) as conf:
config = yaml.load(conf)
elif os.path.exists(DEFAULT_CONFIG_NAME):
with open(DEFAULT_CONFIG_NAME) as conf:
config = yaml.load(conf)
else:
raise Exception('{0} and {1} configuration files not found'.format(
CONFIG_NAME, DEFAULT_CONFIG_NAME))
for slave in config['kube_slaves']['slaves']:
for key, value in config['kube_slaves']['default'].iteritems():

16
utils/jenkins/README.md Normal file
View File

@ -0,0 +1,16 @@
Examples testing
================
To automatically test examples install first fuel-devops framework. Installation process is described here https://github.com/openstack/fuel-devops. After installation run migrations scripts:
```bash
export DJANGO_SETTINGS_MODULE=devops.settings
django-admin.py syncdb
django-admin.py migrate
```
To test examples run one of available test scripts. You need to run it from solar main dir, for example:
```
./utils/jenkins/run_hosts_example.sh
```

View File

@ -0,0 +1,92 @@
---
rack-01-node-params:
vcpu: 2
memory: 1024
boot:
- hd
volumes:
- name: base
source_image:
format: qcow2
interfaces:
- label: eth0
l2_network_device: public
- label: eth1
l2_network_device: private
- label: eth2
l2_network_device: storage
- label: eth3
l2_network_device: management
network_config:
eth0:
networks:
- public
eth1:
networks:
- private
eth2:
networks:
- storage
eth3:
networks:
- management
env_name:
address_pools:
# Network pools used by the environment
public-pool01:
net: 10.10.0.0/16:24
params:
tag: 0
private-pool01:
net: 10.11.0.0/16:24
params:
tag: 101
storage-pool01:
net: 10.12.0.0/16:24
params:
tag: 102
management-pool01:
net: 10.13.0.0/16:24
params:
tag: 103
groups:
- name: rack-01
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: qemu:///system
storage_pool_name: default
stp: True
hpet: False
use_host_cpu: true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
public: public-pool01
private: private-pool01
storage: storage-pool01
management: management-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
public:
address_pool: public-pool01
dhcp: "true"
forward:
mode: nat
private:
address_pool: private-pool01
storage:
address_pool: storage-pool01
management:
address_pool: management-pool01
nodes:
- name: solar
role: master

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -xe
export DONT_DESTROY_ON_SUCCESS=1
export SLAVES_COUNT=3
export DEPLOY_TIMEOUT=1200
export TEST_SCRIPT="/usr/bin/python setup_k8s.py deploy"
./utils/jenkins/run.sh

88
utils/jenkins/env.py Normal file
View File

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
from copy import deepcopy
import os
import subprocess as sub
import sys
import yaml
from devops.models import Environment
def create_overlay_image(env_name, node_name, base_image):
overlay_image_path = 'tmp/{}_{}.qcow2'.format(env_name, node_name)
base_image = os.path.abspath(base_image)
if os.path.exists(overlay_image_path):
os.unlink(overlay_image_path)
try:
sub.call(['qemu-img', 'create', '-b', base_image, '-f', 'qcow2',
overlay_image_path])
except sub.CalledProcessError as e:
print e.output
raise
return overlay_image_path
def create_config():
env = os.environ
conf_path = env['CONF_PATH']
with open(conf_path) as c:
conf = yaml.load(c.read())
env_name = env['ENV_NAME']
image_path = env['IMAGE_PATH']
master_image_path = env.get('MASTER_IMAGE_PATH', None)
if master_image_path is None:
master_image_path = image_path
slaves_count = int(env['SLAVES_COUNT'])
conf['env_name'] = env_name
node_params = conf['rack-01-node-params']
group = conf['groups'][0]
for i in range(slaves_count):
group['nodes'].append({'name': 'slave-{}'.format(i),
'role': 'slave'})
for node in group['nodes']:
node['params'] = deepcopy(node_params)
if node['role'] == 'master':
path = master_image_path
else:
path = image_path
vol_path = create_overlay_image(env_name, node['name'], path)
node['params']['volumes'][0]['source_image'] = vol_path
return {'template': {'devops_settings': conf}}
def get_env():
env = os.environ
env_name = env['ENV_NAME']
return Environment.get(name=env_name)
def get_master_ip(env):
admin=env.get_node(role='master')
return admin.get_ip_address_by_network_name('public')
def get_slave_ips(env):
slaves = env.get_nodes(role='slave')
ips = []
for slave in slaves:
ips.append(slave.get_ip_address_by_network_name('public'))
return ips
def define_from_config(conf):
env = Environment.create_environment(conf)
env.define()
env.start()
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit(2)
cmd = sys.argv[1]
if cmd == 'create_env':
config = create_config()
define_from_config(config)
elif cmd == 'get_admin_ip':
print get_master_ip(get_env())
elif cmd == 'get_slaves_ips':
print get_slave_ips(get_env())

View File

@ -0,0 +1,4 @@
#!/bin/bash
set -xe
dos.py erase ${ENV_NAME}

View File

@ -0,0 +1,13 @@
id: simple_riak_with_transports
resources:
#% for i in range(count|int) %#
#% set j = i +1 %#
- id: node#{j}#
from: k8s/node
input:
name: node#{j}#
ssh_user: 'vagrant'
ssh_key: ''
ssh_password: 'vagrant'
ip: '#{ips[i]}#'
#% endfor %#

129
utils/jenkins/run.sh Executable file
View File

@ -0,0 +1,129 @@
#!/bin/bash
set -xe
# for now we assume that master ip is 10.0.0.2 and slaves ips are 10.0.0.{3,4,5,...}
ADMIN_PASSWORD=vagrant
ADMIN_USER=vagrant
INSTALL_DIR=/home/vagrant/solar-k8s
ENV_NAME=${ENV_NAME:-solar-example}
SLAVES_COUNT=${SLAVES_COUNT:-0}
CONF_PATH=${CONF_PATH:-utils/jenkins/default.yaml}
IMAGE_PATH=${IMAGE_PATH:-bootstrap/output-qemu/ubuntu1404}
TEST_SCRIPT=${TEST_SCRIPT:-/vagrant/examples/hosts_file/hosts.py}
DEPLOY_TIMEOUT=${DEPLOY_TIMEOUT:-60}
SOLAR_DB_BACKEND=${SOLAR_DB_BACKEND:-riak}
SSH_OPTIONS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
dos.py erase ${ENV_NAME} || true
mkdir -p tmp
mkdir -p logs
rm -rf logs/*
ENV_NAME=${ENV_NAME} SLAVES_COUNT=${SLAVES_COUNT} IMAGE_PATH=${IMAGE_PATH} CONF_PATH=${CONF_PATH} python utils/jenkins/env.py create_env
SLAVE_IPS=`ENV_NAME=${ENV_NAME} python utils/jenkins/env.py get_slaves_ips`
ADMIN_IP=`ENV_NAME=${ENV_NAME} python utils/jenkins/env.py get_admin_ip`
# Wait for all servers(grep only IP addresses):
for IP in `echo ${ADMIN_IP} ${SLAVE_IPS} |grep -oE '((1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])'`
do
elapsed_time=0
master_wait_time=30
while true
do
report=$(sshpass -p ${ADMIN_PASSWORD} ssh ${SSH_OPTIONS} ${ADMIN_USER}@${IP} echo ok || echo not ready)
if [ "${report}" = "ok" ]; then
break
fi
if [ "${elapsed_time}" -gt "${master_wait_time}" ]; then
exit 2
fi
sleep 1
let elapsed_time+=1
done
done
sshpass -p ${ADMIN_PASSWORD} rsync -rz . -e "ssh ${SSH_OPTIONS}" ${ADMIN_USER}@${ADMIN_IP}:/home/vagrant/solar-k8s --exclude tmp --exclude x-venv --exclude .vagrant --exclude .eggs --exclude *.box --exclude images
set +e
sshpass -p ${ADMIN_PASSWORD} ssh ${SSH_OPTIONS} ${ADMIN_USER}@${ADMIN_IP} bash -s <<EOF
set -x
set -e
export PYTHONWARNINGS="ignore"
wget https://github.com/openstack/solar-resources/archive/master.zip
unzip master.zip
solar repo import solar-resources-master/resources
solar repo import solar-resources-master/templates
solar repo import -n k8s solar-k8s/resources
solar repo update templates ${INSTALL_DIR}/utils/jenkins/repository
solar resource create nodes templates/nodes ips="${SLAVE_IPS}" count="${SLAVES_COUNT}"
sudo pip install -r ${INSTALL_DIR}/requirements.txt
pushd ${INSTALL_DIR}
bash -c "${TEST_SCRIPT}"
popd
solar changes stage
solar changes process
solar orch run-once
elapsed_time=0
while true
do
report=\$(solar o report)
errors=\$(echo "\${report}" | grep -e ERROR | wc -l)
if [ "\${errors}" != "0" ]; then
solar orch report
echo FAILURE
exit 1
fi
running=\$(echo "\${report}" | grep -e PENDING -e INPROGRESS | wc -l)
if [ "\${running}" == "0" ]; then
solar orch report
echo SUCCESS
exit 0
fi
if [ "\${elapsed_time}" -gt "${DEPLOY_TIMEOUT}" ]; then
solar orch report
echo TIMEOUT
exit 2
fi
sleep 5
let elapsed_time+=5
done
EOF
deploy_res=$?
# collect logs
sshpass -p ${ADMIN_PASSWORD} scp ${SSH_OPTIONS} ${ADMIN_USER}@${ADMIN_IP}:/home/vagrant/solar.log logs/
if [ "${deploy_res}" -eq "0" -a "${DONT_DESTROY_ON_SUCCESS}" -e "1" ];then
dos.py erase ${ENV_NAME}
else
if [ "${deploy_res}" -ne "0" ];then
dos.py snapshot ${ENV_NAME} ${ENV_NAME}.snapshot
dos.py destroy ${ENV_NAME}
echo "To revert snapshot please run: dos.py revert ${ENV_NAME} ${ENV_NAME}.snapshot"
fi
fi
exit ${deploy_res}

View File

@ -0,0 +1,8 @@
#!/bin/bash
set -xe
export SLAVES_COUNT=3
export DEPLOY_TIMEOUT=1200
export TEST_SCRIPT="/usr/bin/python setup_k8s.py deploy"
./utils/jenkins/run.sh