Split repos

This commit is contained in:
Dmitry Pyzhov 2013-08-30 20:21:14 +04:00
parent 8634dd38e9
commit 6a986e7779
146 changed files with 46 additions and 8370 deletions

6
.gitmodules vendored
View File

@ -1,6 +0,0 @@
[submodule "fuel"]
path = fuel
url = https://github.com/Mirantis/fuel.git
[submodule "astute"]
path = astute
url = https://github.com/Mirantis/astute.git

View File

@ -1,79 +0,0 @@
.PHONY: all clean test help deep_clean
help:
@echo 'Build directives (can be overrided by environment variables'
@echo 'or by command line parameters):'
@echo ' SOURCE_DIR: $(SOURCE_DIR)'
@echo ' BUILD_DIR: $(BUILD_DIR)'
@echo ' LOCAL_MIRROR: $(LOCAL_MIRROR)'
@echo ' YUM_REPOS: $(YUM_REPOS)'
@echo ' MIRROR_CENTOS: $(MIRROR_CENTOS)'
@echo ' MIRROR_RHEL: $(MIRROR_RHEL)'
@echo ' MIRROR_EGGS: $(MIRROR_EGGS)'
@echo ' MIRROR_GEMS: $(MIRROR_GEMS)'
@echo ' MIRROR_SRC: $(MIRROR_SRC)'
@echo ' ISO_DIR/ISO_NAME: $(ISO_PATH)'
@echo ' ENV_NAME: $(ENV_NAME)'
@echo ' KSYAML: $(KSYAML)'
@echo
@echo 'Available targets:'
@echo ' all - build product'
@echo ' bootstrap - build bootstrap'
@echo ' iso - build iso image'
@echo ' img - build flash stick image'
@echo ' test - run all tests'
@echo ' test-unit - run unit tests'
@echo ' test-integration - run integration tests'
@echo ' test-integration-env - prepares integration test environment'
@echo ' clean-integration-test - clean integration test environment'
@echo ' clean - remove build directory and resetting .done flags'
@echo ' deep_clean - clean + removing $(LOCAL_MIRROR) directory'
@echo ' distclean - cleans deep_clean + clean-integration-test'
@echo
@echo 'To build system using one of the proprietary mirrors use '
@echo 'the following commands:'
@echo
@echo 'Saratov office (default):'
@echo 'make iso'
@echo
@echo 'Moscow office:'
@echo 'make iso USE_MIRROR=msk'
@echo
@echo 'Custom location:'
@echo 'make iso YUM_REPOS=proprietary \
MIRROR_CENTOS=http://<your_mirror>/centos \
MIRROR_EGGS=http://<your_mirror>/eggs \
MIRROR_GEMS=http://<your_mirror>/gems \
MIRROR_SRC=http://<your_mirror>/src'
# Path to the sources.
# Default value: directory with Makefile
SOURCE_DIR?=$(dir $(lastword $(MAKEFILE_LIST)))
SOURCE_DIR:=$(abspath $(SOURCE_DIR))
all: iso
test: test-unit test-integration
clean:
sudo rm -rf $(BUILD_DIR)
deep_clean: clean
sudo rm -rf $(LOCAL_MIRROR)
distclean: deep_clean clean-integration-test
# Common configuration file.
include $(SOURCE_DIR)/config.mk
# Macroses for make
include $(SOURCE_DIR)/rules.mk
# Sandbox macroses.
include $(SOURCE_DIR)/sandbox.mk
# Modules
include $(SOURCE_DIR)/mirror/module.mk
include $(SOURCE_DIR)/packages/module.mk
include $(SOURCE_DIR)/bootstrap/module.mk
include $(SOURCE_DIR)/iso/module.mk
include $(SOURCE_DIR)/fuelweb_test/module.mk

80
Vagrantfile vendored
View File

@ -1,80 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
ENVIRONMENT_SETUP_SCRIPT = <<-EOS
# To use this script, you must fetch fuel submodule:
# git submodule update
#!/bin/bash
grep -q devnailgun /etc/hosts || echo "10.0.2.15 devnailgun.mirantis.com devnailgun" >> /etc/hosts
sed 's/HOSTNAME=.*/HOSTNAME=devnailgun.mirantis.com/' -i /etc/sysconfig/network
echo "Installing puppet..."
rpm -Uhv http://fedora-mirror02.rbc.ru/pub/epel/6/i386/epel-release-6-8.noarch.rpm
rpm -ivh http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-6.noarch.rpm
for pkg in `grep puppet /vagrant/requirements-rpm.txt`; do yum -y install $pkg; done
echo "Configuring puppet..."
grep -q devnailgun /etc/puppet/puppet.conf || echo " server = devnailgun.mirantis.com" >> /etc/puppet/puppet.conf
grep -q autosign /etc/puppet/puppet.conf || echo "\n[master]\n autosign = true" >> /etc/puppet/puppet.conf
chkconfig puppetmaster on; service puppetmaster restart
echo "Use fuel puppet modules to install mcollective&rabbitmq"
rm -f /etc/puppet/modules.old || :
mv /etc/puppet/modules /etc/puppet/modules.old || :
ln -sfT /fuel/deployment/puppet /etc/puppet/modules
mv /etc/puppet/manifests/site.pp /etc/puppet/manifests/site.pp.old || :
cat > /etc/puppet/manifests/site.pp << EOF
node default {
Exec {path => '/usr/bin:/bin:/usr/sbin:/sbin'}
class { mcollective::rabbitmq:
user => "mcollective",
password => "marionette",
}
class { mcollective::client:
pskey => "unset",
user => "mcollective",
password => "marionette",
host => "127.0.0.1",
port => "61613"
}
}
EOF
puppet agent --test
echo "Restoring site.pp and modules to previously set.."
mv /etc/puppet/modules.old /etc/puppet/modules || :
mv /etc/puppet/manifests/site.pp.old /etc/puppet/manifests/site.pp || :
echo "Installing mcollective..."
for pkg in `grep mcollective /vagrant/requirements-rpm.txt`; do yum -y install $pkg; done
chkconfig mcollective on
service mcollective start
# Debug tools
yum -y install strace bind-utils
yum -y install httpd
EOS
Vagrant::Config.run do |config|
config.vm.define :centos63 do |vm_config|
vm_config.vm.box = "centos63"
vm_config.vm.box_url = "http://srv08-srt.srt.mirantis.net/CentOS-6.3-x86_64-minimal.box"
vm_config.vm.customize ["modifyvm", :id, "--memory", 1024]
# Boot with a GUI so you can see the screen. (Default is headless)
#config.vm.boot_mode = :gui
config.vm.share_folder "v-data", "/fuel", "./fuel"
# extra network for testing
vm_config.vm.network :hostonly, '10.1.1.2', :adapter => 2
vm_config.vm.provision :shell, :inline => ENVIRONMENT_SETUP_SCRIPT
end
end

1
astute

@ -1 +0,0 @@
Subproject commit 861af62ba2c56a389bda9893e30d2a084c10d746

View File

@ -1,206 +0,0 @@
.PHONY: bootstrap clean clean-bootstrap
INITRAMROOT:=$(BUILD_DIR)/bootstrap/initram-root
BOOTSTRAP_RPMS:=\
bash \
byacc \
cronie-noanacron \
crontabs \
dhclient \
dmidecode \
flex \
gcc \
iputils \
make \
mcollective \
mingetty \
net-tools \
ntp \
openssh-clients \
openssh-server \
rsyslog \
ruby-devel.x86_64 \
rubygems \
scapy \
tcpdump \
vconfig \
vim-minimal \
wget \
BOOTSTRAP_RPMS_GARBAGE:=\
byacc \
flex \
gcc \
make \
ruby-devel.x86_64 \
BOOTSTRAP_RPMS_CUSTOM:=\
nailgun-agent \
nailgun-mcagents \
nailgun-net-check \
dhcp_checker
define yum_local_repo
[mirror]
name=Mirantis mirror
baseurl=file://$(LOCAL_MIRROR_CENTOS_OS_BASEURL)
gpgcheck=0
enabled=1
endef
define bootstrap_yum_conf
[main]
cachedir=$(BUILD_DIR)/bootstrap/cache
keepcache=0
debuglevel=6
logfile=$(BUILD_DIR)/bootstrap/yum.log
exclude=*.i686.rpm
exactarch=1
obsoletes=1
gpgcheck=0
plugins=1
pluginpath=$(BUILD_DIR)/bootstrap/etc/yum-plugins
pluginconfpath=$(BUILD_DIR)/bootstrap/etc/yum/pluginconf.d
reposdir=$(BUILD_DIR)/bootstrap/etc/yum.repos.d
endef
YUM:=sudo yum -c $(BUILD_DIR)/bootstrap/etc/yum.conf --installroot=$(INITRAMROOT) -y --nogpgcheck
clean: clean-bootstrap
clean-bootstrap:
sudo rm -rf $(INITRAMROOT)
bootstrap: $(BUILD_DIR)/bootstrap/build.done
$(BUILD_DIR)/bootstrap/build.done: \
$(BUILD_DIR)/bootstrap/linux \
$(BUILD_DIR)/bootstrap/initramfs.img
$(ACTION.TOUCH)
$(BUILD_DIR)/bootstrap/initramfs.img: \
$(BUILD_DIR)/bootstrap/customize-initram-root.done
sudo sh -c "cd $(INITRAMROOT) && find . -xdev | cpio --create \
--format='newc' | gzip -9 > $(BUILD_DIR)/bootstrap/initramfs.img"
$(BUILD_DIR)/bootstrap/linux: $(BUILD_DIR)/mirror/build.done
mkdir -p $(BUILD_DIR)/bootstrap
find $(LOCAL_MIRROR_CENTOS_OS_BASEURL) -name 'kernel-2.*' | xargs rpm2cpio | \
(cd $(BUILD_DIR)/bootstrap/; cpio -imd './boot/vmlinuz*')
mv $(BUILD_DIR)/bootstrap/boot/vmlinuz* $(BUILD_DIR)/bootstrap/linux
rm -r $(BUILD_DIR)/bootstrap/boot
touch $(BUILD_DIR)/bootstrap/linux
$(BUILD_DIR)/bootstrap/etc/yum.conf: export contents:=$(bootstrap_yum_conf)
$(BUILD_DIR)/bootstrap/etc/yum.repos.d/base.repo: export contents:=$(yum_local_repo)
$(BUILD_DIR)/bootstrap/etc/yum.conf $(BUILD_DIR)/bootstrap/etc/yum.repos.d/base.repo:
mkdir -p $(@D)
/bin/echo -e "$${contents}" > $@
$(BUILD_DIR)/bootstrap/customize-initram-root.done: $(call depv,BOOTSTRAP_RPMS_CUSTOM)
$(BUILD_DIR)/bootstrap/customize-initram-root.done: \
$(BUILD_DIR)/packages/rpm/build.done \
$(BUILD_DIR)/bootstrap/prepare-initram-root.done \
$(call find-files,$(SOURCE_DIR)/bootstrap/sync) \
$(SOURCE_DIR)/bin/send2syslog.py \
$(SOURCE_DIR)/bootstrap/ssh/id_rsa.pub \
$(BUILD_DIR)/bootstrap/etc/yum.conf \
$(BUILD_DIR)/bootstrap/etc/yum.repos.d/base.repo
# Rebuilding rpmdb
sudo rpm --root=$(INITRAMROOT) --rebuilddb
# Installing custom rpms
$(YUM) install $(BOOTSTRAP_RPMS_CUSTOM)
# Copying custom files
sudo rsync -aK $(SOURCE_DIR)/bootstrap/sync/ $(INITRAMROOT)
sudo cp -r $(SOURCE_DIR)/bin/send2syslog.py $(INITRAMROOT)/usr/bin
# Enabling pre-init boot interface discovery
sudo chroot $(INITRAMROOT) chkconfig setup-bootdev on
# Setting root password into r00tme
sudo sed -i -e '/^root/c\root:$$6$$oC7haQNQ$$LtVf6AI.QKn9Jb89r83PtQN9fBqpHT9bAFLzy.YVxTLiFgsoqlPY3awKvbuSgtxYHx4RUcpUqMotp.WZ0Hwoj.:15441:0:99999:7:::' $(INITRAMROOT)/etc/shadow
# Copying rsa key.
sudo mkdir -p $(INITRAMROOT)/root/.ssh
sudo cp $(SOURCE_DIR)/bootstrap/ssh/id_rsa.pub $(INITRAMROOT)/root/.ssh/authorized_keys
sudo chmod 700 $(INITRAMROOT)/root/.ssh
sudo chmod 600 $(INITRAMROOT)/root/.ssh/authorized_keys
# Copying bash init files
sudo cp -f $(INITRAMROOT)/etc/skel/.bash* $(INITRAMROOT)/root/
# Removing garbage
sudo rm -rf $(INITRAMROOT)/home/*
sudo rm -rf \
$(INITRAMROOT)/var/cache/yum \
$(INITRAMROOT)/var/lib/yum \
$(INITRAMROOT)/usr/share/doc \
$(INITRAMROOT)/usr/share/locale \
sudo rm -rf $(INITRAMROOT)/tmp/*
$(ACTION.TOUCH)
$(BUILD_DIR)/bootstrap/prepare-initram-root.done: $(call depv,BOOTSTRAP_RPMS)
$(BUILD_DIR)/bootstrap/prepare-initram-root.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/bootstrap/etc/yum.conf \
$(BUILD_DIR)/bootstrap/etc/yum.repos.d/base.repo
# Installing centos-release package
sudo rpm -i --root=$(INITRAMROOT) \
`find $(LOCAL_MIRROR_CENTOS_OS_BASEURL) -name "centos-release*rpm" | head -1` || \
echo "centos-release already installed"
# Removing default repositories (centos-release package provides them)
sudo rm -f $(INITRAMROOT)/etc/yum.repos.d/Cent*
# Rebuilding rpmdb
sudo rpm --root=$(INITRAMROOT) --rebuilddb
# Creating some necessary directories
sudo mkdir -p $(INITRAMROOT)/proc
sudo mkdir -p $(INITRAMROOT)/dev
sudo mkdir -p $(INITRAMROOT)/var/lib/rpm
# Installing rpms
$(YUM) install $(BOOTSTRAP_RPMS) $(BOOTSTRAP_RPMS_TEMPORARY)
# Installing gems
sudo mkdir -p $(INITRAMROOT)/tmp/gems
sudo rsync -a --delete $(LOCAL_MIRROR_GEMS)/ $(INITRAMROOT)/tmp/gems
sudo chroot $(INITRAMROOT) gem install httpclient --version 2.2.5 --no-rdoc --no-ri --source file:///tmp/gems
sudo chroot $(INITRAMROOT) gem install ipaddress --version 0.8.0 --no-rdoc --no-ri --source file:///tmp/gems
sudo chroot $(INITRAMROOT) gem install json_pure --version 1.7.5 --no-rdoc --no-ri --source file:///tmp/gems
sudo chroot $(INITRAMROOT) gem install ohai --version 6.14.0 --no-rdoc --no-ri --source file:///tmp/gems
sudo chroot $(INITRAMROOT) gem install rethtool --version 0.0.3 --no-rdoc --no-ri --source file:///tmp/gems
sudo rm -rf \
$(INITRAMROOT)/tmp/gems \
$(INITRAMROOT)/usr/lib/ruby/gems/1.8/cache/*
# Removing temporary rpms (devel packages, they were needed to install gems)
$(YUM) erase $(BOOTSTRAP_RPMS_GARBAGE)
# Disabling mail server (it have been installed as a dependency)
-sudo chroot $(INITRAMROOT) chkconfig exim off
-sudo chroot $(INITRAMROOT) chkconfig postfix off
# Installing kernel modules
find $(LOCAL_MIRROR_CENTOS_OS_BASEURL) -name 'kernel-2.*' | xargs rpm2cpio | \
( cd $(INITRAMROOT); sudo cpio -idm './lib/modules/*' './boot/vmlinuz*' )
find $(LOCAL_MIRROR_CENTOS_OS_BASEURL) -name 'kernel-firmware-2.*' | xargs rpm2cpio | \
( cd $(INITRAMROOT); sudo cpio -idm './lib/firmware/*' )
for version in `ls -1 $(INITRAMROOT)/lib/modules`; do \
sudo depmod -b $(INITRAMROOT) $$version; \
done
# Some extra actions
sudo touch $(INITRAMROOT)/etc/fstab
sudo cp $(INITRAMROOT)/sbin/init $(INITRAMROOT)/init
$(ACTION.TOUCH)

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA7a1U0jPLRneBPJISZ9oHzf9n8pp1/9GCgBklWU1gzSHGC+c1
05UsK3HJ//BIJpzbCTcbnCUU/qFqqwxmhWnobsJWPyAapudCiVWlK+UGMzlKalaR
SKaF91MBEwsXEgdS4kGGwr6yXKjj0OGbvXzITBimgF9aIczrpp+HM7iw2CLwpz56
FOUQ1re4NRmhRZcxlNyRaKI+/2lhSTKiS4p72st769rxaQbztctvZyrvs4KWZM8y
QnNJ3aFarwflvQ2OkLYE0ln3d2RP/L4OIfEFfB/scai/7lgsQ5L9mN674gB4r0f1
ftWAkkB9DQSazUpEY3fta6C4XBBk+/2BJ24FVQIDAQABAoIBAHbFfXOlqllGcvC/
1i7Lh8brcRiNE5aJLfuxlTZxMoSP8hYUrpNTIkV7kYQyoPuauuJ6BXQcG8e7BkD/
62OULzDaMJtPAcKSIm/aurWat2R1prhJFkUF4kBb3FeV3SuHOWYTdLJw9VTUmTPS
6i7g4n8UenAANlxZuREE+11fWBBJPwmcStwGq9qVn+SydLsSxU0g8cGqd+edYV2p
zBINA88iEkEAQzmK8Pmm9Dqbf5trnSGjqe5tkqxV1CEvb3LcIkxlJgMaWoJNF5ip
Ajl/+WD16xEqbSnr9fAkKXdPbZJW9dPckC6gxeiRx2TXl44XXGX6phv/JUzb+b5n
in+lG0ECgYEA+T6CnPKcKnEO+82Kj14vt+UMZADCDN9i42kodwYJSLlQljiAYRhq
/SmjSNV0A77udpKtN/VFkpssRxFiuazOL4IVJyoQnYNRC0Mov6yexYsejmW7B6xd
YPvPkFPShSMXR16l0Qs0K3sE2QvsNHqzF+mOyriCturF/sjQATrBQgkCgYEA9B6O
xdpfudjh+3/Jx03a4SqQ8r5n6hG56SUJi7dfCpt8Sx94vBsoHThog9u132D47OR2
9xyL3kwhbux1IAn3UpvR07pjKlwSsNlIjeU5lrdWqfxwjf9XLI08/yaeJYDzamsF
EkaxGjxFeXXe2W9pZQ8wCH4kC7K5DI3PrOyhi+0CgYEA2tDGWoOkFp6rSOVqf/Nw
cUHlXuZLdoGnj38Cx3vFKtOGrFtuWc/WHewwLgE3mRJGyFJ9QGNUcSFJ/hqFUf0k
BxFtALUtYRuoSvrrC1vBCCza5qjpShNI55wq3cq5R6lHBqFRcfUkGAgoeWS+OlR+
Zr/lXJQgLfZvXP9vnaS3hgECgYBDiUywehivnsvuO01EgUqDrrYJIho3jI3lbWS1
rZZypc9+LQfG6rBvXRK1IAyx7u8PAqnS9afQaTl6qZpBseAlj4w+SjtzFaPqH0LO
VtNW2gP05Szya+jfH3f3kuR/tawiDdeKVdpbSr9hufVcLFF5Lvl38AXi4qAFVBpX
WePbhQKBgQDl0Y0Nzj4zKb+tD3+Cg8/c0xLGqiyKoX+87c9Dru+aWudfZEjrwi2F
Ui3iN31rrEl+OqwJnC4gR+hWuDuF9gVMb17GoiM/fcvgzes9NL1YWFeKHU8ZKRox
w3nxStHyiETfTyC9LOS1JCfFIMcpYD0lFuHnQHib/HX3dmUvbqo77Q==
-----END RSA PRIVATE KEY-----

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtrVTSM8tGd4E8khJn2gfN/2fymnX/0YKAGSVZTWDNIcYL5zXTlSwrccn/8EgmnNsJNxucJRT+oWqrDGaFaehuwlY/IBqm50KJVaUr5QYzOUpqVpFIpoX3UwETCxcSB1LiQYbCvrJcqOPQ4Zu9fMhMGKaAX1ohzOumn4czuLDYIvCnPnoU5RDWt7g1GaFFlzGU3JFooj7/aWFJMqJLinvay3vr2vFpBvO1y29nKu+zgpZkzzJCc0ndoVqvB+W9DY6QtgTSWfd3ZE/8vg4h8QV8H+xxqL/uWCxDkv2Y3rviAHivR/V+1YCSQH0NBJrNSkRjd+1roLhcEGT7/YEnbgVV nailgun@bootstrap

View File

@ -1,92 +0,0 @@
#! /bin/bash
#
# setup-bootdev Setup necessary network interface to DHCP
#
# chkconfig: 2345 05 90
# description: Setup necessary network interface to DHCP.
#
### BEGIN INIT INFO
# Provides: $(setup-bootdev)
# Short-Description: Setup necessary network interface to DHCP
# Description: Setup necessary network interface to DHCP
### END INIT INFO
# Source function library.
. /etc/init.d/functions
cd /etc/sysconfig/network-scripts
. ./network-functions
get_bootdev() {
bootaddr=$(ruby -e \
'File.open("/proc/cmdline") { |fo|
l = fo.read
m = l.match( /BOOTIF=([0-9A-Fa-f]{2}(-[0-9A-Fa-f]{2})+)/ )
print m[1].split("-")[-6..-1].join(":")
}' 2> /dev/null)
if [ -z "$bootaddr" ]; then
return 1
fi
dev=$(get_device_by_hwaddr $bootaddr)
if [ -z "$dev" ]; then
return 2
fi
echo $dev
}
get_alldevs() {
ip link|grep -oE 'eth[0-9]+'|sort|uniq
}
set_interfaces_up_when_booted() {
devs=$(get_alldevs)
for name in $devs; do
content="DEVICE=$name\nBOOTPROTO=none\nONBOOT=yes"
echo -e "$content" > "./ifcfg-$name"
done
}
# See how we were called.
case "$1" in
start)
echo -n "Configure all interfaces as active..."
set_interfaces_up_when_booted
echo "ok."
echo -n "Obtain boot interface name..."
dev=$(get_bootdev)
rc=$?
if [ $rc -ne 0 ]; then
echo "failed."
echo -en "Obtain all eth interfaces..."
dev=$(get_alldevs)
if [ -z "$dev" ]; then
rc=1
echo "failed."
else
rc=0
dev_str=$(echo "$dev"|tr "\n" " "|sed 's/ *$//')
echo "ok ($dev_str)."
fi
else
echo "ok ($dev)."
fi
if [ $rc -eq 0 ]; then
for name in $dev; do
content="DEVICE=$name\nBOOTPROTO=dhcp\nONBOOT=yes"
echo -e "$content" > "./ifcfg-$name"
done
fi
action $"Update network interfaces settings: " [ $rc -eq 0 ]
;;
*)
rc=0
;;
esac
exit $rc

View File

@ -1,9 +0,0 @@
# tty - getty
#
# This service maintains a getty on the specified device.
stop on runlevel [S016]
respawn
instance $TTY
exec /sbin/mingetty --noclear $TTY

Binary file not shown.

View File

@ -1,29 +0,0 @@
main_collective = mcollective
collectives = mcollective
libdir = /usr/libexec/mcollective
logfile = /var/log/mcollective.log
loglevel = debug
direct_addressing = 1
daemonize = 1
# Set huge value of ttl to avoid cases with unsyncronized time between nodes
# bash$ date -d '2033-5-18 3:33:20 UTC' +%s
# 2000000000
# It means that ttl equals 63 years and a half.
ttl = 2000000000
# Plugins
securityprovider = psk
plugin.psk = unset
connector = rabbitmq
plugin.rabbitmq.vhost = mcollective
plugin.rabbitmq.pool.size = 1
plugin.rabbitmq.pool.1.host =
plugin.rabbitmq.pool.1.port = 61613
plugin.rabbitmq.pool.1.user = mcollective
plugin.rabbitmq.pool.1.password = marionette
# Facts
factsource = yaml
plugin.yaml = /etc/mcollective/facts.yaml

View File

@ -1 +0,0 @@
bootstrap

View File

@ -1,8 +0,0 @@
#!/bin/sh -e
mount -t devpts devpts /dev/pts
fix-configs-on-startup || true
flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1" || true
touch /var/lock/subsys/local

View File

@ -1,6 +0,0 @@
# Log all messages with this template
$template CustomLog, "%$NOW%T%TIMESTAMP:8:$%Z %syslogseverity-text% %syslogtag% %msg%\n"
$ActionFileDefaultTemplate CustomLog
user.debug /var/log/messages

View File

@ -1,16 +0,0 @@
Protocol 2
SyslogFacility AUTHPRIV
PasswordAuthentication no
PubkeyAuthentication yes
ChallengeResponseAuthentication no
GSSAPIAuthentication no
UsePAM no
UseDNS no
# Accept locale-related environment variables
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
AcceptEnv XMODIFIERS
Subsystem sftp /usr/libexec/openssh/sftp-server

View File

@ -1 +0,0 @@
ZONE="UTC"

View File

@ -1,3 +0,0 @@
HOSTNAME=bootstrap
NETWORKING=yes
VLAN=yes

View File

@ -1,54 +0,0 @@
#!/bin/sh
masternode_ip=`sed -rn 's/^.*url=http:\/\/(([0-9]{1,3}\.){3}[0-9]{1,3}).*$/\1/ p' /proc/cmdline`
# Send logs to master node.
cat > /etc/send2syslog.conf <<EOF
{
"watchlist": [
{"servers": [ {"host": "$masternode_ip"} ],
"watchfiles": [
{"tag": "bootstrap/dmesg", "files": ["/var/log/dmesg"]},
{"tag": "bootstrap/secure", "files": ["/var/log/secure"]},
{"tag": "bootstrap/messages", "files": ["/var/log/messages"]},
{"tag": "bootstrap/mcollective", "log_type": "ruby",
"files": ["/var/log/mcollective.log"]},
{"tag": "bootstrap/agent", "log_type": "ruby",
"files": ["/var/log/nailgun-agent.log"]}
]
}
]
}
EOF
/usr/bin/send2syslog.py -i < /etc/send2syslog.conf
#
# Set up NTP
#
# Disable panic about huge clock offset
#
sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
sed -i '1 i tinker panic 0' /etc/ntp.conf
# Create default drift file
#
mkdir -p /var/lib/ntp
chown ntp: /var/lib/ntp
echo 0 > /var/lib/ntp/drift
chown ntp: /var/lib/ntp/drift
# Sync clock with master node
#
sed -i "/^\s*server\b/ d" /etc/ntp.conf
echo "server $masternode_ip burst iburst" >> /etc/ntp.conf
# Add some additional logs in ntpdate init script.
sed -i '46 a logger -t ntpdate-log -- `echo /usr/sbin/ntpdate $OPTIONS $tickers` || true\
/usr/sbin/ntpdate $OPTIONS -d $tickers &> /dev/null' /etc/init.d/ntpdate
/etc/init.d/ntpdate start
/etc/init.d/ntpd start
#
# Update mcollective config
#
sed -i "s/^plugin.rabbitmq.pool.1.host\b.*$/plugin.rabbitmq.pool.1.host = $masternode_ip/" /etc/mcollective/server.cfg
service mcollective restart

164
config.mk
View File

@ -1,164 +0,0 @@
#
# Build directives. Can be overrided by environment variables.
#
# Base path for build and mirror directories.
# Default value: current directory
TOP_DIR?=$(PWD)
TOP_DIR:=$(abspath $(TOP_DIR))
# Path for build artifacts
BUILD_DIR?=$(TOP_DIR)/build
BUILD_DIR:=$(abspath $(BUILD_DIR))
# Path for cache of downloaded packages
LOCAL_MIRROR?=$(TOP_DIR)/local_mirror
LOCAL_MIRROR:=$(abspath $(LOCAL_MIRROR))
MASTER_IP?=10.20.0.2
MASTER_DNS?=10.20.0.1
MASTER_NETMASK?=255.255.255.0
MASTER_GW?=10.20.0.1
COMMIT_SHA:=$(shell git rev-parse --verify HEAD)
PRODUCT_VERSION:=3.2
FUEL_COMMIT_SHA:=$(shell cd fuel && git rev-parse --verify HEAD)
CENTOS_MAJOR:=6
CENTOS_MINOR:=4
CENTOS_RELEASE:=$(CENTOS_MAJOR).$(CENTOS_MINOR)
CENTOS_ARCH:=x86_64
UBUNTU_RELEASE:=precise
ISO_NAME?=fuelweb-centos-$(CENTOS_RELEASE)-$(CENTOS_ARCH)
ISO_DIR?=$(BUILD_DIR)/iso
ISO_DIR:=$(abspath $(ISO_DIR))
ISO_PATH:=$(ISO_DIR)/$(ISO_NAME).iso
IMG_PATH:=$(ISO_DIR)/$(ISO_NAME).img
# Do not compress javascript and css files
NO_UI_OPTIMIZE:=0
# Do not copy RHEL repo to the iso
CACHE_RHEL:=0
LOCAL_MIRROR_SRC:=$(LOCAL_MIRROR)/src
LOCAL_MIRROR_EGGS:=$(LOCAL_MIRROR)/eggs
LOCAL_MIRROR_GEMS:=$(LOCAL_MIRROR)/gems
LOCAL_MIRROR_CENTOS:=$(LOCAL_MIRROR)/centos
LOCAL_MIRROR_CENTOS_OS_BASEURL:=$(LOCAL_MIRROR_CENTOS)/os/$(CENTOS_ARCH)
LOCAL_MIRROR_UBUNTU:=$(LOCAL_MIRROR)/ubuntu
LOCAL_MIRROR_UBUNTU_OS_BASEURL:=$(LOCAL_MIRROR_UBUNTU)/main
LOCAL_MIRROR_RHEL:=$(LOCAL_MIRROR)/rhel
BUILD_MIRROR_GEMS:=$(BUILD_DIR)/packages/gems
# Use download.mirantis.com mirror by default. Other possible values are
# 'msk', 'srt', 'usa'.
# Setting any other value or removing of this variable will cause
# download of all the packages directly from internet
USE_MIRROR?=ext
ifeq ($(USE_MIRROR),ext)
YUM_REPOS?=proprietary
MIRROR_BASE?=http://download.mirantis.com/fuelweb-repo/$(PRODUCT_VERSION)
MIRROR_CENTOS?=$(MIRROR_BASE)/centos
MIRROR_UBUNTU?=$(MIRROR_BASE)/ubuntu
MIRROR_EGGS?=$(MIRROR_BASE)/eggs
MIRROR_GEMS?=$(MIRROR_BASE)/gems
MIRROR_SRC?=$(MIRROR_BASE)/src
endif
ifeq ($(USE_MIRROR),srt)
YUM_REPOS?=proprietary
MIRROR_BASE?=http://srv08-srt.srt.mirantis.net/fwm/$(PRODUCT_VERSION)
MIRROR_CENTOS?=$(MIRROR_BASE)/centos
MIRROR_UBUNTU?=$(MIRROR_BASE)/ubuntu
MIRROR_EGGS?=$(MIRROR_BASE)/eggs
MIRROR_GEMS?=$(MIRROR_BASE)/gems
MIRROR_SRC?=$(MIRROR_BASE)/src
endif
ifeq ($(USE_MIRROR),msk)
YUM_REPOS?=proprietary
MIRROR_BASE?=http://srv11-msk.msk.mirantis.net/fwm/$(PRODUCT_VERSION)
MIRROR_CENTOS?=$(MIRROR_BASE)/centos
MIRROR_UBUNTU?=$(MIRROR_BASE)/ubuntu
MIRROR_EGGS?=$(MIRROR_BASE)/eggs
MIRROR_GEMS?=$(MIRROR_BASE)/gems
MIRROR_SRC?=$(MIRROR_BASE)/src
endif
ifeq ($(USE_MIRROR),usa)
YUM_REPOS?=proprietary
MIRROR_BASE?=http://product-vm.vm.mirantis.net/fwm/$(PRODUCT_VERSION)
MIRROR_CENTOS?=$(MIRROR_BASE)/centos
MIRROR_UBUNTU?=$(MIRROR_BASE)/ubuntu
MIRROR_EGGS?=$(MIRROR_BASE)/eggs
MIRROR_GEMS?=$(MIRROR_BASE)/gems
MIRROR_SRC?=$(MIRROR_BASE)/src
endif
#
# OSCI team requirement: build an iso with our srv08 mirror,
# but use their repo for fuel packages. This section is quick
# way to implement it.
# Limitation of the solution: osci repo will be mixed with srv08 mirror.
# If package is missed in osci repo - it will be taken from srv08.
# If package have the same version in osci and in srv08 repos - any copy
# of it will be taken randomly.
#
ifeq ($(USE_MIRROR),osci)
YUM_REPOS?=proprietary fuel
MIRROR_FUEL?=http://download.mirantis.com/epel-fuel-grizzly-3.2/
MIRROR_FUEL_UBUNTU?=http://download.mirantis.com/epel-fuel-grizzly-3.2/
MIRROR_BASE?=http://srv08-srt.srt.mirantis.net/fwm/$(PRODUCT_VERSION)
MIRROR_CENTOS?=$(MIRROR_BASE)/centos
MIRROR_UBUNTU?=$(MIRROR_BASE)/ubuntu
MIRROR_EGGS?=$(MIRROR_BASE)/eggs
MIRROR_GEMS?=$(MIRROR_BASE)/gems
MIRROR_SRC?=$(MIRROR_BASE)/src
endif
MIRROR_CENTOS?=http://mirror.yandex.ru/centos/$(CENTOS_RELEASE)
MIRROR_CENTOS_OS_BASEURL:=$(MIRROR_CENTOS)/os/$(CENTOS_ARCH)
MIRROR_UBUNTU?=http://mirror.yandex.ru/ubuntu/dists/$(UBUNTU_RELEASE)
MIRROR_UBUNTU_OS_BASEURL:=$(MIRROR_UBUNTU)/main
MIRROR_RHEL?=http://srv11-msk.msk.mirantis.net/rhel6/rhel-6-server-rpms
MIRROR_RHEL_BOOT?=http://srv11-msk.msk.mirantis.net/rhel6/rhel-server-6.4-x86_64
# MIRROR_FUEL option is valid only for 'fuel' YUM_REPOS section
# and ignored in other cases
MIRROR_FUEL?=http://172.18.165.40:82/centos-fuel-3.2-testing/centos/
MIRROR_FUEL_UBUNTU?=http://172.18.165.40:82/ubuntu-fuel-3.2-testing/reprepro/
# It can be any a list of links (--find-links) or a pip index (--index-url).
MIRROR_EGGS?=http://pypi.python.org/simple
# NOTE(mihgen): removed gemcutter - it redirects to rubygems.org and has issues w/certificate now
MIRROR_GEMS?=http://rubygems.org
REQUIRED_RPMS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/requirements-rpm.txt)
REQUIRED_DEBS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/requirements-deb.txt)
REQUIRED_EGGS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/requirements-eggs.txt)
OSTF_EGGS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/fuel/deployment/puppet/nailgun/files/venv-ostf.txt)
REQUIRED_SRCS:=$(shell grep -v ^\\s*\# $(SOURCE_DIR)/requirements-src.txt)
REQ_RHEL_RPMS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/fuel/deployment/puppet/rpmcache/files/required-rpms.txt)
REQ_FUEL_RHEL_RPMS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/fuel/deployment/puppet/rpmcache/files/req-fuel-rhel.txt)
OSTF_PLUGIN_SHA?=f1c7870793a3aa724673c30391d3255a0d9465d5
OSTF_PLUGIN_VER?=0.2
OSTF_TESTS_SHA?=92b4e5e8d10f1a45f7433d06eb3a5936adb4050e
OSTF_TESTS_VER?=0.1
# Which repositories to use for making local centos mirror.
# Possible values you can find out from mirror/centos/yum_repos.mk file.
# The actual name will be constracted wich prepending "yum_repo_" prefix.
# Example: YUM_REPOS?=official epel => yum_repo_official and yum_repo_epel
# will be used.
YUM_REPOS?=official fuel subscr_manager
ifeq ($(CACHE_RHEL),1)
YUM_REPOS:=$(YUM_REPOS) rhel
endif
# Mirror of source packages. Bareword 'internet' is used to download packages
# directly from the internet
MIRROR_SRC?=internet
# INTEGRATION TEST CONFIG
NOFORWARD:=1
# Path to yaml configuration file to build ISO ks.cfg
KSYAML?=$(SOURCE_DIR)/iso/ks.yaml

1
fuel

@ -1 +0,0 @@
Subproject commit e79dd4a6b93c88040bb513efb73126d0112c3eaa

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,183 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import subprocess
import urllib2
import logging
import json
import threading
import select
from fuelweb_test.integration.decorators import debug
logger = logging.getLogger(__name__)
logwrap = debug(logger)
"""
Integration test helpers
"""
class HTTPClient(object):
def __init__(self, url=""):
self.url = url
self.opener = urllib2.build_opener(urllib2.HTTPHandler)
def get(self, endpoint):
req = urllib2.Request(self.url + endpoint)
return self._open(req)
def post(self, endpoint, data=None, content_type="application/json"):
if not data:
data = {}
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
return self._open(req)
def put(self, endpoint, data=None, content_type="application/json"):
if not data:
data = {}
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
req.get_method = lambda: 'PUT'
return self._open(req)
def delete(self, endpoint):
req = urllib2.Request(self.url + endpoint)
req.get_method = lambda: 'DELETE'
return self._open(req)
def _open(self, req):
return self.opener.open(req)
class LogServer(threading.Thread):
@logwrap
def __init__(self, address="localhost", port=5514):
super(LogServer, self).__init__()
self.socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM
)
self.socket.bind((str(address), port))
self.rlist = [self.socket]
self._stop = threading.Event()
self._handler = self.handler
self._status = False
def handler(self, messages):
pass
def set_status(self, status):
self._status = status
def get_status(self):
return self._status
def set_handler(self, handler):
self._handler = handler
@logwrap
def stop(self):
self.socket.close()
self._stop.set()
def started(self):
return not self._stop.is_set()
def rude_join(self, timeout=None):
self._stop.set()
super(LogServer, self).join(timeout)
def join(self, timeout=None):
self.rude_join(timeout)
@logwrap
def run(self):
while self.started():
r, w, e = select.select(self.rlist, [], [], 1)
if self.socket in r:
message, addr = self.socket.recvfrom(2048)
self._handler(message)
class TriggeredLogServer(LogServer):
def __init__(self, address="localhost", port=5514):
super(TriggeredLogServer, self).__init__(address, port)
self.set_handler(self.handler)
def handler(self, message):
self.set_status(True)
class Ebtables(object):
def __init__(self, target_devs, vlans):
super(Ebtables, self).__init__()
self.target_devs = target_devs
self.vlans = vlans
@logwrap
def restore_vlans(self):
for vlan in self.vlans:
for target_dev in self.target_devs:
Ebtables.restore_vlan(target_dev, vlan)
@logwrap
def restore_first_vlan(self):
for target_dev in self.target_devs:
Ebtables.restore_vlan(target_dev, self.vlans[0])
@logwrap
def block_first_vlan(self):
for target_dev in self.target_devs:
Ebtables.block_vlan(target_dev, self.vlans[0])
@staticmethod
@logwrap
def block_mac(mac):
return subprocess.check_output(
['sudo', 'ebtables', '-t', 'filter', '-A', 'FORWARD', '-s',
mac, '-j', 'DROP'],
stderr=subprocess.STDOUT
)
@staticmethod
@logwrap
def restore_mac(mac):
return subprocess.call(
[
'sudo', 'ebtables', '-t', 'filter',
'-D', 'FORWARD', '-s', mac, '-j', 'DROP'
],
stderr=subprocess.STDOUT,
)
@staticmethod
@logwrap
def restore_vlan(target_dev, vlan):
return subprocess.call(
['sudo', 'ebtables', '-t', 'broute', '-D', 'BROUTING', '-i',
target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP'],
stderr=subprocess.STDOUT,
)
@staticmethod
@logwrap
def block_vlan(target_dev, vlan):
return subprocess.check_output(
['sudo', 'ebtables', '-t', 'broute', '-A', 'BROUTING', '-i',
target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP'],
stderr=subprocess.STDOUT
)

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,487 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from devops.helpers.helpers import SSHClient, wait, _wait
from paramiko import RSAKey
import re
import hashlib
from fuelweb_test.helpers import Ebtables
from fuelweb_test.integration.base_test_case import BaseTestCase
from fuelweb_test.integration.decorators import debug
from fuelweb_test.nailgun_client import NailgunClient
from fuelweb_test.settings import CLEAN, NETWORK_MANAGERS, EMPTY_SNAPSHOT, \
REDHAT_USERNAME, REDHAT_PASSWORD, REDHAT_SATELLITE_HOST, \
REDHAT_ACTIVATION_KEY, OPENSTACK_RELEASE, OPENSTACK_RELEASE_REDHAT, \
REDHAT_LICENSE_TYPE, READY_SNAPSHOT
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class BaseNodeTestCase(BaseTestCase):
environment_states = {}
def setUp(self):
self.client = NailgunClient(self.get_admin_node_ip())
@logwrap
def get_interface_description(self, ctrl_ssh, interface_short_name):
return ''.join(
ctrl_ssh.execute(
'/sbin/ip addr show dev %s' % interface_short_name
)['stdout']
)
def assertNetworkConfiguration(self, node):
remote = SSHClient(node['ip'], username='root', password='r00tme',
private_keys=self.get_private_keys())
for interface in node['network_data']:
if interface.get('vlan') is None:
continue # todo excess check fix interface json format
interface_name = "%s.%s@%s" % (
interface['dev'], interface['vlan'], interface['dev'])
interface_short_name = "%s.%s" % (
interface['dev'], interface['vlan'])
interface_description = self.get_interface_description(
remote, interface_short_name)
self.assertIn(interface_name, interface_description)
if interface.get('name') == 'floating':
continue
if interface.get('ip'):
self.assertIn("inet %s" % interface.get('ip'),
interface_description)
else:
self.assertNotIn("inet ", interface_description)
if interface.get('brd'):
self.assertIn("brd %s" % interface['brd'],
interface_description)
@logwrap
def is_node_discovered(self, nailgun_node):
return any(
map(lambda node: node['mac'] == nailgun_node['mac']
and node['status'] == 'discover', self.client.list_nodes()))
@logwrap
def get_target_devs(self, devops_nodes):
return [
interface.target_dev for interface in [
val for var in map(lambda node: node.interfaces, devops_nodes)
for val in var]]
@logwrap
def get_ebtables(self, cluster_id, devops_nodes):
return Ebtables(
self.get_target_devs(devops_nodes),
self.client._get_cluster_vlans(cluster_id))
@logwrap
def _get_common_vlan(self, cluster_id):
"""Find vlan that must be at all two nodes.
"""
return self.client.get_networks(
cluster_id)['networks'][0]['vlan_start']
@logwrap
def _run_network_verify(self, cluster_id):
return self.client.verify_networks(
cluster_id, self.client.get_networks(cluster_id)['networks'])
@logwrap
def check_role_file(self, nodes_dict):
for node, roles in self.get_nailgun_node_roles(nodes_dict):
remote = SSHClient(
node['ip'], username='root', password='r00tme',
private_keys=self.get_private_keys())
for role in roles:
if role != "cinder":
self.assertTrue(remote.isfile('/tmp/%s-file' % role))
@logwrap
def clean_clusters(self):
self.client.clean_clusters()
@logwrap
def configure_cluster(self, cluster_id, nodes_dict):
self.update_nodes(cluster_id, nodes_dict, True, False)
# TODO: update network configuration
@logwrap
def basic_provisioning(self, cluster_id, nodes_dict, port=5514):
self.client.add_syslog_server(
cluster_id, self.ci().get_host_node_ip(), port)
self.bootstrap_nodes(self.devops_nodes_by_names(nodes_dict.keys()))
self.configure_cluster(cluster_id, nodes_dict)
task = self.deploy_cluster(cluster_id)
self.assertTaskSuccess(task)
self.check_role_file(nodes_dict)
return cluster_id
@logwrap
def prepare_environment(self, name='cluster_name', mode="multinode",
settings=None):
if not(self.ci().revert_to_state(settings)):
self.get_ready_environment()
if settings is None:
return None
if 'nodes' in settings:
cluster_id = self.create_cluster(name=name, mode=mode)
self.basic_provisioning(cluster_id, settings['nodes'])
self.ci().snapshot_state(name, settings)
# return id of last created cluster
clusters = self.client.list_clusters()
if len(clusters) > 0:
return clusters.pop()['id']
return None
@logwrap
def get_nailgun_node_roles(self, nodes_dict):
nailgun_node_roles = []
for node_name in nodes_dict:
slave = self.ci().environment().node_by_name(node_name)
node = self.get_node_by_devops_node(slave)
nailgun_node_roles.append((node, nodes_dict[node_name]))
return nailgun_node_roles
@logwrap
def deploy_cluster(self, cluster_id):
"""Return hash with task description."""
return self.client.deploy_cluster_changes(cluster_id)
@logwrap
def assertTaskSuccess(self, task, timeout=90 * 60):
self.assertEquals('ready', self._task_wait(task, timeout)['status'])
@logwrap
def assertTaskFailed(self, task, timeout=70 * 60):
self.assertEquals('error', self._task_wait(task, timeout)['status'])
@logwrap
def assertOSTFRunSuccess(self, cluster_id, should_fail=0, should_pass=0,
timeout=10 * 60):
set_result_list = self._ostf_test_wait(cluster_id, timeout)
passed = 0
failed = 0
for set_result in set_result_list:
passed += len(filter(lambda test: test['status'] == 'success',
set_result['tests']))
failed += len(
filter(
lambda test: test['status'] == 'failure' or
test['status'] == 'error',
set_result['tests']
)
)
self.assertEqual(passed, should_pass, 'Passed tests')
self.assertEqual(failed, should_fail, 'Failed tests')
@logwrap
def run_OSTF(self, cluster_id, test_sets=None,
should_fail=0, should_pass=0):
test_sets = test_sets \
if test_sets is not None \
else ['fuel_smoke', 'fuel_sanity']
self.client.ostf_run_tests(cluster_id, test_sets)
self.assertOSTFRunSuccess(cluster_id, should_fail=should_fail,
should_pass=should_pass)
@logwrap
def _task_wait(self, task, timeout):
wait(
lambda: self.client.get_task(
task['id'])['status'] != 'running',
timeout=timeout)
return self.client.get_task(task['id'])
@logwrap
def _ostf_test_wait(self, cluster_id, timeout):
wait(
lambda: all([run['status'] == 'finished'
for run in
self.client.get_ostf_test_run(cluster_id)]),
timeout=timeout)
return self.client.get_ostf_test_run(cluster_id)
@logwrap
def _tasks_wait(self, tasks, timeout):
return [self._task_wait(task, timeout) for task in tasks]
@logwrap
def _upload_sample_release(self):
release_id = self.client.get_release_id()
if not release_id:
raise Exception("Not implemented uploading of release")
return release_id
@logwrap
def get_or_create_cluster(self, name, release_id, mode="multinode"):
if not release_id:
release_id = self._upload_sample_release()
cluster_id = self.client.get_cluster_id(name)
if not cluster_id:
self.client.create_cluster(
data={
"name": name,
"release": str(release_id),
"mode": mode
}
)
cluster_id = self.client.get_cluster_id(name)
if not cluster_id:
raise Exception("Could not get cluster '%s'" % name)
return cluster_id
@logwrap
def create_cluster(self, name='default', release_id=None,
mode="multinode"):
"""
:param name:
:param release_id:
:param mode:
:return: cluster_id
"""
return self.get_or_create_cluster(name, release_id, mode)
@logwrap
def update_nodes(self, cluster_id, nodes_dict,
pending_addition=True, pending_deletion=False):
# update nodes in cluster
nodes_data = []
for node_name in nodes_dict:
devops_node = self.ci().environment().node_by_name(node_name)
node = self.get_node_by_devops_node(devops_node)
node_data = {'cluster_id': cluster_id, 'id': node['id'],
'pending_addition': pending_addition,
'pending_deletion': pending_deletion,
'pending_roles': nodes_dict[node_name]}
nodes_data.append(node_data)
# assume nodes are going to be updated for one cluster only
cluster_id = nodes_data[-1]['cluster_id']
node_ids = [str(node_info['id']) for node_info in nodes_data]
self.client.update_nodes(nodes_data)
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
cluster_node_ids = map(lambda node: str(node['id']), nailgun_nodes)
self.assertTrue(
all([node_id in cluster_node_ids for node_id in node_ids]))
return nailgun_nodes
@logwrap
def get_node_by_devops_node(self, devops_node):
"""Returns dict with nailgun slave node description if node is
registered. Otherwise return None.
"""
mac_addresses = map(
lambda interface: interface.mac_address.capitalize(),
devops_node.interfaces)
for nailgun_node in self.client.list_nodes():
if nailgun_node['mac'].capitalize() in mac_addresses:
nailgun_node['devops_name'] = devops_node.name
return nailgun_node
return None
def nailgun_nodes(self, devops_nodes):
return map(lambda node: self.get_node_by_devops_node(node),
devops_nodes)
def devops_nodes_by_names(self, devops_node_names):
return map(lambda name: self.ci().environment().node_by_name(name),
devops_node_names)
@logwrap
def bootstrap_nodes(self, devops_nodes, timeout=600):
"""Start vms and wait they are registered on nailgun.
:rtype : List of registred nailgun nodes
"""
for node in devops_nodes:
node.start()
wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)
return self.nailgun_nodes(devops_nodes)
@logwrap
def assert_service_list(self, remote, smiles_count):
ret = remote.check_call('/usr/bin/nova-manage service list')
self.assertEqual(
smiles_count, ''.join(ret['stdout']).count(":-)"), "Smiles count")
self.assertEqual(
0, ''.join(ret['stdout']).count("XXX"), "Broken services count")
@logwrap
def assert_node_service_list(self, node_name, smiles_count):
ip = self.get_node_by_devops_node(
self.ci().environment().node_by_name(node_name))['ip']
remote = SSHClient(ip, username='root', password='r00tme',
private_keys=self.get_private_keys())
return self.assert_service_list(remote, smiles_count)
@logwrap
def assert_glance_index(self, ctrl_ssh):
ret = ctrl_ssh.check_call('. /root/openrc; glance index')
self.assertEqual(1, ''.join(ret['stdout']).count("TestVM"))
@logwrap
def assert_network_list(self, networks_count, remote):
ret = remote.check_call('/usr/bin/nova-manage network list')
self.assertEqual(networks_count + 1, len(ret['stdout']))
@logwrap
def assertClusterReady(self, node_name, smiles_count,
networks_count=1, timeout=300):
_wait(
lambda: self.get_cluster_status(
self.get_node_by_devops_node(
self.ci().environment().node_by_name(node_name))['ip'],
smiles_count=smiles_count,
networks_count=networks_count),
timeout=timeout)
@logwrap
def _get_remote(self, ip):
return SSHClient(ip, username='root', password='r00tme',
private_keys=self.get_private_keys())
@logwrap
def _get_remote_for_node(self, node_name):
ip = self.get_node_by_devops_node(
self.ci().environment().node_by_name(node_name))['ip']
return self._get_remote(ip)
@logwrap
def get_cluster_status(self, ip, smiles_count, networks_count=1):
remote = self._get_remote(ip)
self.assert_service_list(remote, smiles_count)
self.assert_glance_index(remote)
self.assert_network_list(networks_count, remote)
@logwrap
def get_cluster_floating_list(self, node_name):
remote = self._get_remote_for_node(node_name)
ret = remote.check_call('/usr/bin/nova-manage floating list')
ret_str = ''.join(ret['stdout'])
return re.findall('(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', ret_str)
@logwrap
def get_cluster_block_devices(self, node_name):
remote = self._get_remote_for_node(node_name)
ret = remote.check_call('/bin/lsblk')
return ''.join(ret['stdout'])
@logwrap
def assert_cluster_floating_list(self, node_name, expected_ips):
current_ips = self.get_cluster_floating_list(node_name)
self.assertEqual(set(expected_ips), set(current_ips))
@logwrap
def get_private_keys(self):
keys = []
for key_string in ['/root/.ssh/id_rsa', '/root/.ssh/bootstrap.rsa']:
with self.remote().open(key_string) as f:
keys.append(RSAKey.from_private_key(f))
return keys
@logwrap
def update_node_networks(self, node_id, interfaces_dict):
interfaces = self.client.get_node_interfaces(node_id)
for interface in interfaces:
interface_name = interface['name']
interface['assigned_networks'] = []
for allowed_network in interface['allowed_networks']:
key_exists = interface_name in interfaces_dict
if key_exists and \
allowed_network['name'] \
in interfaces_dict[interface_name]:
interface['assigned_networks'].append(allowed_network)
self.client.put_node_interfaces(
[{'id': node_id, 'interfaces': interfaces}])
@logwrap
def update_vlan_network_fixed(
self, cluster_id, amount=1, network_size=256):
network_list = self.client.get_networks(cluster_id)['networks']
for network in network_list:
if network["name"] == 'fixed':
network['amount'] = amount
network['network_size'] = network_size
self.client.update_network(
cluster_id,
networks=network_list,
net_manager=NETWORK_MANAGERS['vlan'])
@logwrap
def get_ready_environment(self):
if self.ci().get_state(READY_SNAPSHOT):
self.environment().resume(verbose=False)
return
self.ci().get_empty_environment()
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
# update redhat credentials so that fuel may upload redhat
# packages
# download redhat repo from local place to boost the test
# remote = self.nodes().admin.remote(
# 'internal', 'root', 'r00tme')
# remote.execute(
# 'wget -q http://172.18.67.168/rhel6/rhel-rpms.tar.gz')
# remote.execute('tar xzf rhel-rpms.tar.gz -C /')
self.update_redhat_credentials()
self.assert_release_state(OPENSTACK_RELEASE_REDHAT,
state='available')
self.environment().suspend(verbose=False)
self.environment().snapshot(READY_SNAPSHOT)
self.environment().resume(verbose=False)
@logwrap
def update_redhat_credentials(
self, license_type=REDHAT_LICENSE_TYPE,
username=REDHAT_USERNAME, password=REDHAT_PASSWORD,
satellite_host=REDHAT_SATELLITE_HOST,
activation_key=REDHAT_ACTIVATION_KEY):
# release name is in environment variable OPENSTACK_RELEASE
release_id = self.client.get_release_id('RHOS')
self.client.update_redhat_setup({
"release_id": release_id,
"username": username,
"license_type": license_type,
"satellite": satellite_host,
"password": password,
"activation_key": activation_key})
tasks = self.client.get_tasks()
# wait for 'redhat_setup' task only. Front-end works same way
for task in tasks:
if task['name'] == 'redhat_setup' \
and task['result']['release_info']['release_id'] \
== release_id:
return self._task_wait(task, 60 * 120)
def assert_release_state(self, release_name, state='available'):
for release in self.client.get_releases():
if release["name"].find(release_name) != -1:
self.assertEqual(release['state'], state)
return release["id"]

View File

@ -1,49 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers.helpers import SSHClient
import logging
from unittest.case import TestCase
from fuelweb_test.integration.ci_fuel_web import CiFuelWeb
logging.basicConfig(format=':%(lineno)d: %(asctime)s %(message)s',
level=logging.DEBUG)
class BaseTestCase(TestCase):
def ci(self):
if not hasattr(self, '_ci'):
self._ci = CiFuelWeb()
return self._ci
def environment(self):
return self.ci().environment()
def nodes(self):
return self.ci().nodes()
def remote(self):
"""
:rtype : SSHClient
"""
return self.nodes().admin.remote(
'internal',
login='root',
password='r00tme')
def get_admin_node_ip(self):
return str(
self.nodes().admin.get_ip_address_by_network_name('internal'))

View File

@ -1,203 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import abstractproperty, abstractmethod
from devops.helpers.helpers import _get_file_size
from devops.manager import Manager
from ipaddr import IPNetwork
import hashlib
from fuelweb_test.node_roles import Nodes
from fuelweb_test.settings import EMPTY_SNAPSHOT, ISO_PATH
class CiBase(object):
def __init__(self):
self.manager = Manager()
self._environment = None
self.saved_environment_states = {}
def _get_or_create(self):
try:
return self.manager.environment_get(self.env_name())
except:
self._environment = self.describe_environment()
self._environment.define()
return self._environment
def get_state(self, name):
if self.environment().has_snapshot(name):
self.environment().revert(name)
return True
return False
def environment(self):
"""
:rtype : devops.models.Environment
"""
self._environment = self._environment or self._get_or_create()
return self._environment
@abstractproperty
def env_name(self):
"""
:rtype : string
"""
pass
@abstractmethod
def describe_environment(self):
"""
:rtype : devops.models.Environment
"""
pass
@abstractproperty
def node_roles(self):
"""
:rtype : NodeRoles
"""
pass
def nodes(self):
return Nodes(self.environment(), self.node_roles())
# noinspection PyShadowingBuiltins
def add_empty_volume(self, node, name, capacity=20 * 1024 * 1024 * 1024,
device='disk', bus='virtio', format='qcow2'):
self.manager.node_attach_volume(
node=node,
volume=self.manager.volume_create(
name=name, capacity=capacity,
environment=self.environment(),
format=format),
device=device, bus=bus)
def add_node(self, memory, name, boot=None):
return self.manager.node_create(
name=name,
memory=memory,
environment=self.environment(),
boot=boot)
def create_interfaces(self, networks, node):
for network in networks:
if network.name == 'internal':
self.manager.interface_create(network, node=node)
self.manager.interface_create(network, node=node)
self.manager.interface_create(network, node=node)
def describe_admin_node(self, name, networks, memory=1024):
node = self.add_node(memory=memory, name=name, boot=['hd', 'cdrom'])
self.create_interfaces(networks, node)
self.add_empty_volume(node, name + '-system')
self.add_empty_volume(
node, name + '-iso', capacity=_get_file_size(ISO_PATH),
format='raw', device='cdrom', bus='ide')
return node
def describe_empty_node(self, name, networks, memory=1024):
node = self.add_node(memory, name)
self.create_interfaces(networks, node)
self.add_empty_volume(node, name + '-system')
self.add_empty_volume(node, name + '-cinder')
self.add_empty_volume(node, name + '-swift')
return node
@abstractmethod
def setup_environment(self):
"""
:rtype : None
"""
pass
def get_empty_environment(self):
if not(self.get_state(EMPTY_SNAPSHOT)):
self.setup_environment()
self.environment().snapshot(EMPTY_SNAPSHOT)
def generate_state_hash(self, settings):
return hashlib.md5(str(settings)).hexdigest()
def revert_to_state(self, settings={}):
state_hash = self.generate_state_hash(settings)
if state_hash in self.saved_environment_states:
# revert to matching state
state = self.saved_environment_states[state_hash]
if not(self.get_state(state['snapshot_name'])):
return False
self.environment().resume()
return True
return False
def snapshot_state(self, name, settings={}):
state_hash = self.generate_state_hash(settings)
snapshot_name = '{0}_{1}'.format(
name.replace(' ', '_')[:17], state_hash)
self.environment().suspend(verbose=False)
self.environment().snapshot(
name=snapshot_name,
description=name,
force=True,
)
self.environment().resume(verbose=False)
self.saved_environment_states[state_hash] = {
'snapshot_name': snapshot_name,
'cluster_name': name,
'settings': settings
}
def internal_virtual_ip(self):
return str(IPNetwork(
self.environment().network_by_name('internal').ip_network)[-2])
def public_router(self):
return str(
IPNetwork(
self.environment().network_by_name('public').ip_network)[1])
def internal_router(self):
return self._router('internal')
def nat_router(self):
return self._router('nat')
def _router(self, router_name):
return str(
IPNetwork(
self.environment().network_by_name(router_name).ip_network)[1])
def get_host_node_ip(self):
return self.internal_router()
def internal_network(self):
return str(
IPNetwork(
self.environment().network_by_name('internal').ip_network))
def internal_net_mask(self):
return str(IPNetwork(
self.environment().network_by_name('internal').ip_network).netmask)
def public_net_mask(self):
return str(IPNetwork(
self.environment().network_by_name('public').ip_network).netmask)
def public_network(self):
return str(
IPNetwork(self.environment().network_by_name('public').ip_network))

View File

@ -1,138 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import logging
from devops.helpers.helpers import wait
from ipaddr import IPNetwork
from fuelweb_test.integration.ci_base import CiBase
from fuelweb_test.node_roles import NodeRoles
from fuelweb_test.settings import INTERFACE_ORDER, POOLS, EMPTY_SNAPSHOT,\
ISO_PATH, FORWARDING, DHCP
logger = logging.getLogger('integration')
class CiFuelWeb(CiBase):
hostname = 'nailgun'
domain = 'mirantis.com'
installation_timeout = 1800
deployment_timeout = 1800
puppet_timeout = 1000
def node_roles(self):
return NodeRoles(
admin_names=['admin'],
other_names=['slave-%02d' % x for x in range(1, 10)]
)
def env_name(self):
return os.environ.get('ENV_NAME', 'fuelweb')
def describe_environment(self):
"""
:rtype : Environment
"""
environment = self.manager.environment_create(self.env_name())
networks = []
for name in INTERFACE_ORDER:
ip_networks = [IPNetwork(x) for x in POOLS.get(name)[0].split(',')]
new_prefix = int(POOLS.get(name)[1])
pool = self.manager.create_network_pool(
networks=ip_networks, prefix=int(new_prefix))
networks.append(self.manager.network_create(
name=name, environment=environment, pool=pool,
forward=FORWARDING.get(name), has_dhcp_server=DHCP.get(name)))
for name in self.node_roles().admin_names:
self.describe_admin_node(name, networks)
for name in self.node_roles().other_names:
self.describe_empty_node(name, networks, memory=1024)
return environment
def wait_bootstrap(self):
logging.info("Waiting while bootstrapping is in progress")
log_path = "/var/log/puppet/bootstrap_admin_node.log"
wait(
lambda: not
self.nodes().admin.remote('internal', 'root', 'r00tme').execute(
"grep 'Finished catalog run' '%s'" % log_path
)['exit_code'],
timeout=self.puppet_timeout
)
def get_keys(self, node):
params = {
'ip': node.get_ip_address_by_network_name('internal'),
'mask': self.internal_net_mask(),
'gw': self.nat_router(),
'hostname': '.'.join((self.hostname, self.domain))
}
keys = (
"<Esc><Enter>\n"
"<Wait>\n"
"vmlinuz initrd=initrd.img ks=cdrom:/ks.cfg\n"
" ip=%(ip)s\n"
" netmask=%(mask)s\n"
" gw=%(gw)s\n"
" dns1=%(gw)s\n"
" hostname=%(hostname)s\n"
" <Enter>\n"
) % params
return keys
def enable_nat_for_admin_node(self):
remote = self.nodes().admin.remote('internal', 'root', 'r00tme')
nat_interface_id = 5
file_name = \
'/etc/sysconfig/network-scripts/ifcfg-eth%s' % nat_interface_id
hwaddr = \
''.join(remote.execute('grep HWADDR %s' % file_name)['stdout'])
uuid = ''.join(remote.execute('grep UUID %s' % file_name)['stdout'])
nameserver = os.popen(
"grep '^nameserver' /etc/resolv.conf | "
"grep -v 'nameserver\s\s*127.' | head -3").read()
remote.execute('echo -e "%s'
'%s'
'DEVICE=eth%s\\n'
'TYPE=Ethernet\\n'
'ONBOOT=yes\\n'
'NM_CONTROLLED=no\\n'
'BOOTPROTO=dhcp\\n'
'PEERDNS=no" > %s'
% (hwaddr, uuid, nat_interface_id, file_name))
remote.execute(
'sed "s/GATEWAY=.*/GATEWAY="%s"/g" -i /etc/sysconfig/network'
% self.nat_router())
remote.execute('echo -e "%s" > /etc/dnsmasq.upstream' % nameserver)
remote.execute('service network restart >/dev/null 2>&1')
remote.execute('service dnsmasq restart >/dev/null 2>&1')
def setup_environment(self):
# start admin node
admin = self.nodes().admin
admin.disk_devices.get(device='cdrom').volume.upload(ISO_PATH)
self.environment().start(self.nodes().admins)
# update network parameters at boot screen
time.sleep(20)
admin.send_keys(self.get_keys(admin))
# wait while installation complete
admin.await('internal', timeout=10 * 60)
self.wait_bootstrap()
time.sleep(10)
self.enable_nat_for_admin_node()

View File

@ -1,93 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
import logging
import os
import time
import urllib2
from fuelweb_test.settings import LOGS_DIR
def save_logs(ip, filename):
logging.info('Saving logs to "%s" file' % filename)
with open(filename, 'w') as f:
f.write(
urllib2.urlopen("http://%s:8000/api/logs/package" % ip).read()
)
def fetch_logs(func):
""" Decorator to fetch logs to file.
"""
@functools.wraps(func)
def wrapper(*args, **kwagrs):
# noinspection PyBroadException
try:
return func(*args, **kwagrs)
finally:
if LOGS_DIR:
if not os.path.exists(LOGS_DIR):
os.makedirs(LOGS_DIR)
save_logs(
args[0].get_admin_node_ip(),
os.path.join(LOGS_DIR, '%s-%d.tar.gz' % (
func.__name__,
time.time())))
return wrapper
def snapshot_errors(func):
""" Decorator to snapshot environment when error occurred in test.
"""
@functools.wraps(func)
def wrapper(*args, **kwagrs):
try:
return func(*args, **kwagrs)
except:
name = 'error-%s' % func.__name__
description = "Failed in method '%s'" % func.__name__
logging.debug("Snapshot %s %s" % (name, description))
if args[0].ci() is not None:
args[0].ci().environment().suspend(verbose=False)
args[0].ci().environment().snapshot(
name=name[-50:],
description=description,
force=True,
)
raise
return wrapper
def debug(logger):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
logger.debug(
"Calling: %s with args: %s %s" % (func.__name__, args, kwargs))
result = func(*args, **kwargs)
logger.debug("Done: %s with result: %s" % (func.__name__, result))
return result
return wrapped
return wrapper
def json_parse(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
return json.loads(response.read())
return wrapped

View File

@ -1,77 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
import xmlrpclib
from devops.helpers.helpers import wait, tcp_ping, http
from nose.plugins.attrib import attr
from fuelweb_test.integration.base_test_case import BaseTestCase
from fuelweb_test.integration.decorators import debug, fetch_logs
from fuelweb_test.settings import CLEAN
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class TestAdminNode(BaseTestCase):
def setUp(self):
if CLEAN:
self.ci().get_empty_environment()
@logwrap
@attr(test_thread='thread_1')
def test_puppetmaster_alive(self):
wait(
lambda: tcp_ping(self.get_admin_node_ip(), 8140),
timeout=5
)
ps_output = self.remote().execute('ps ax')['stdout']
pm_processes = filter(
lambda x: '/usr/sbin/puppetmasterd' in x,
ps_output
)
logging.debug("Found puppet master processes: %s" % pm_processes)
self.assertEquals(len(pm_processes), 4)
@logwrap
@attr(test_thread='thread_1')
def test_cobbler_alive(self):
wait(
lambda: http(host=self.get_admin_node_ip(), url='/cobbler_api',
waited_code=502),
timeout=60
)
server = xmlrpclib.Server(
'http://%s/cobbler_api' % self.get_admin_node_ip())
# raises an error if something isn't right
server.login('cobbler', 'cobbler')
@logwrap
@fetch_logs
@attr(test_thread='thread_1')
def test_nailyd_alive(self):
ps_output = self.remote().execute('ps ax')['stdout']
naily_master = filter(lambda x: 'naily master' in x, ps_output)
logging.debug("Found naily processes: %s" % naily_master)
self.assertEquals(len(naily_master), 1)
naily_workers = filter(lambda x: 'naily worker' in x, ps_output)
logging.debug(
"Found %d naily worker processes: %s" %
(len(naily_workers), naily_workers))
self.assertEqual(True, len(naily_workers) > 1)
if __name__ == '__main__':
unittest.main()

View File

@ -1,463 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from devops.helpers.helpers import wait
from nose.plugins.attrib import attr
from fuelweb_test.helpers import Ebtables
from fuelweb_test.integration.base_node_test_case import BaseNodeTestCase
from fuelweb_test.integration.decorators import snapshot_errors, \
debug, fetch_logs
from fuelweb_test.settings import EMPTY_SNAPSHOT
logging.basicConfig(
format=':%(lineno)d: %(asctime)s %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class TestNode(BaseNodeTestCase):
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_release_upload(self):
self.prepare_environment()
self._upload_sample_release()
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_http_returns_no_error(self):
self.prepare_environment()
self.client.get_root()
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_create_empty_cluster(self):
self.prepare_environment()
self.create_cluster(name='empty')
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_node_deploy(self):
self.prepare_environment()
self.bootstrap_nodes(self.nodes().slaves[:1])
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_updating_nodes_in_cluster(self):
self.prepare_environment()
cluster_id = self.create_cluster(name='empty')
nodes = {'slave-01': ['controller']}
self.bootstrap_nodes(self.nodes().slaves[:1])
self.update_nodes(cluster_id, nodes)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_one_node_provisioning(self):
self.prepare_environment()
cluster_id = self.create_cluster(name="provision")
self.basic_provisioning(
cluster_id=cluster_id,
nodes_dict={'slave-01': ['controller']}
)
self.run_OSTF(cluster_id=cluster_id, should_fail=12, should_pass=12)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_simple_cluster_flat(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute']
}
})
self.assertClusterReady(
'slave-01', smiles_count=6, networks_count=1, timeout=300)
self.get_ebtables(cluster_id, self.nodes().slaves[:2]).restore_vlans()
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
self.run_OSTF(cluster_id=cluster_id, should_fail=5, should_pass=19)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_simple_cluster_vlan(self):
self.prepare_environment()
cluster_name = 'simple_vlan'
nodes = {'slave-01': ['controller'], 'slave-02': ['compute']}
cluster_id = self.create_cluster(name=cluster_name)
self.update_vlan_network_fixed(cluster_id, amount=8, network_size=32)
self.basic_provisioning(cluster_id, nodes)
self.assertClusterReady(
'slave-01', smiles_count=6, networks_count=8, timeout=300)
self.get_ebtables(cluster_id, self.nodes().slaves[:2]).restore_vlans()
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
self.run_OSTF(cluster_id=cluster_id, should_fail=5, should_pass=19)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_network_config(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute']
}
})
slave = self.nodes().slaves[0]
node = self.get_node_by_devops_node(slave)
self.assertNetworkConfiguration(node)
self.run_OSTF(cluster_id=cluster_id, should_fail=5, should_pass=19)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_node_deletion(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute']
}
})
nailgun_nodes = self.update_nodes(
cluster_id, {'slave-01': ['controller']}, False, True)
task = self.deploy_cluster(cluster_id)
self.assertTaskSuccess(task)
nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
self.assertTrue(
len(nodes) == 1,
"Verify 1 node has pending deletion status"
)
wait(lambda: self.is_node_discovered(
nodes[0]),
timeout=3 * 60
)
self.run_OSTF(cluster_id=cluster_id, should_fail=23, should_pass=1)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_network_verify_with_blocked_vlan(self):
self.prepare_environment()
cluster_name = 'net_verify'
nodes_dict = {'slave-01': ['controller'], 'slave-02': ['compute']}
cluster_id = self.create_cluster(name=cluster_name)
devops_nodes = self.nodes().slaves[:2]
self.bootstrap_nodes(devops_nodes)
ebtables = self.get_ebtables(cluster_id, devops_nodes)
ebtables.restore_vlans()
self.update_nodes(cluster_id, nodes_dict)
try:
ebtables.block_first_vlan()
task = self._run_network_verify(cluster_id)
self.assertTaskFailed(task, 60 * 2)
finally:
ebtables.restore_first_vlan()
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_multinic_bootstrap_booting(self):
self.prepare_environment()
slave = self.nodes().slaves[0]
mac_addresses = [interface.mac_address for interface in
slave.interfaces.filter(network__name='internal')]
try:
for mac in mac_addresses:
Ebtables.block_mac(mac)
for mac in mac_addresses:
Ebtables.restore_mac(mac)
slave.destroy(verbose=False)
self.nodes().admins[0].revert(EMPTY_SNAPSHOT)
nailgun_slave = self.bootstrap_nodes([slave])[0]
self.assertEqual(mac.upper(), nailgun_slave['mac'].upper())
Ebtables.block_mac(mac)
finally:
for mac in mac_addresses:
Ebtables.restore_mac(mac)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_simple_cluster_with_cinder(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
})
self.assertClusterReady(
'slave-01', smiles_count=6, networks_count=1, timeout=300)
self.run_OSTF(cluster_id=cluster_id, should_fail=5, should_pass=19)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_add_compute_node(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute']
}
})
self.bootstrap_nodes(self.nodes().slaves[2:3])
self.update_nodes(cluster_id, {'slave-03': ['compute']}, True, False)
task = self.client.deploy_cluster_changes(cluster_id)
self.assertTaskSuccess(task)
self.assertEqual(3, len(self.client.list_cluster_nodes(cluster_id)))
self.assertClusterReady(
self.nodes().slaves[0].name,
smiles_count=8, networks_count=1, timeout=300)
self.assert_node_service_list(self.nodes().slaves[1].name, 8)
self.assert_node_service_list(self.nodes().slaves[2].name, 8)
self.run_OSTF(cluster_id=cluster_id, should_fail=5, should_pass=19)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_floating_ips(self):
self.prepare_environment()
cluster_name = 'floating_ips'
nodes_dict = {'slave-01': ['controller'], 'slave-02': ['compute']}
self.bootstrap_nodes(self.nodes().slaves[:2])
cluster_id = self.create_cluster(name=cluster_name)
# set ip ranges for floating network
networks = self.client.get_networks(cluster_id)
for interface, network in enumerate(networks['networks']):
if network['name'] == 'floating':
networks['networks'][interface]['ip_ranges'] = [
['240.0.0.2', '240.0.0.10'],
['240.0.0.20', '240.0.0.25'],
['240.0.0.30', '240.0.0.35']]
break
self.client.update_network(cluster_id,
net_manager=networks['net_manager'],
networks=networks['networks'])
# adding nodes in cluster
self.update_nodes(cluster_id, nodes_dict, True, False)
task = self.deploy_cluster(cluster_id)
self.assertTaskSuccess(task)
# assert ips
expected_ips = ['240.0.0.%s' % i for i in range(2, 11, 1)] + \
['240.0.0.%s' % i for i in range(20, 26, 1)] + \
['240.0.0.%s' % i for i in range(30, 36, 1)]
self.assert_cluster_floating_list('slave-02', expected_ips)
self.run_OSTF(cluster_id=cluster_id, should_fail=5, should_pass=19)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_node_disk_sizes(self):
self.prepare_environment()
# all nodes have 3 identical disks with same size
nodes_dict = {
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['cinder']
}
self.bootstrap_nodes(self.nodes().slaves[:3])
# assert /api/nodes
nailgun_nodes = self.client.list_nodes()
for node in nailgun_nodes:
for disk in node['meta']['disks']:
self.assertEqual(disk['size'], 21474836480, 'Disk size')
notifications = self.client.get_notifications()
for node in nailgun_nodes:
# assert /api/notifications
for notification in notifications:
if notification['node_id'] == node['id']:
self.assertIn('64.0 GB HDD', notification['message'])
# assert disks
disks = self.client.get_node_disks(node['id'])
for disk in disks:
self.assertEqual(disk['size'], 19980, 'Disk size')
# deploy the cluster
self.prepare_environment(settings={'nodes': nodes_dict})
# assert node disks after deployment
for node_name in nodes_dict:
str_block_devices = self.get_cluster_block_devices(node_name)
self.assertRegexpMatches(
str_block_devices,
'vda\s+252:0\s+0\s+20G\s+0\s+disk'
)
self.assertRegexpMatches(
str_block_devices,
'vdb\s+252:16\s+0\s+20G\s+0\s+disk'
)
self.assertRegexpMatches(
str_block_devices,
'vdc\s+252:32\s+0\s+20G\s+0\s+disk'
)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_node_multiple_interfaces(self):
self.prepare_environment()
cluster_name = 'node interfaces'
interfaces_dict = {
'eth0': ['management'],
'eth1': ['floating', 'public'],
'eth2': ['storage'],
'eth3': ['fixed']
}
nodes_dict = {
'slave-01': ['controller'],
'slave-02': ['compute']
}
self.bootstrap_nodes(self.nodes().slaves[:2])
cluster_id = self.create_cluster(name=cluster_name)
self.update_nodes(cluster_id, nodes_dict, True, False)
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.update_node_networks(node['id'], interfaces_dict)
task = self.deploy_cluster(cluster_id)
self.assertTaskSuccess(task)
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.assertNetworkConfiguration(node)
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_untagged_network(self):
cluster_name = 'simple_untagged'
vlan_turn_off = {'vlan_start': None}
nodes = {
'slave-01': ['controller'],
'slave-02': ['compute']
}
interfaces = {
'eth0': ["storage"],
'eth1': ["public", "floating"],
'eth2': ["management"],
'eth3': ["fixed"]
}
self.prepare_environment()
# create a new empty cluster and add nodes to it:
cluster_id = self.create_cluster(name=cluster_name)
self.bootstrap_nodes(self.nodes().slaves[:2])
self.update_nodes(cluster_id, nodes)
# assign all networks to second network interface:
nets = self.client.get_networks(cluster_id)['networks']
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.update_node_networks(node['id'], interfaces)
# select networks that will be untagged:
[net.update(vlan_turn_off) for net in nets if net["name"] != "storage"]
# stop using VLANs:
self.client.update_network(cluster_id,
networks=nets)
# run network check:
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 5)
# deploy cluster:
task = self.deploy_cluster(cluster_id)
self.assertTaskSuccess(task)
#run network check again:
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 5)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_multirole_controller_cinder(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller', 'cinder'],
'slave-02': ['compute']
}
})
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
self.run_OSTF(cluster_id=cluster_id, should_fail=4, should_pass=20)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_multirole_compute_cinder(self):
cluster_id = self.prepare_environment(settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder']
}
})
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
self.run_OSTF(cluster_id=cluster_id, should_fail=4, should_pass=20)
if __name__ == '__main__':
unittest.main()

View File

@ -1,58 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from nose.plugins.attrib import attr
from fuelweb_test.integration.base_node_test_case import BaseNodeTestCase
from fuelweb_test.integration.decorators import snapshot_errors, \
debug, fetch_logs
logging.basicConfig(
format=':%(lineno)d: %(asctime)s %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class TestNode(BaseNodeTestCase):
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_3')
def test_ha_cluster_vlan(self):
self.prepare_environment()
cluster_name = 'ha_vlan'
nodes_dict = {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
cluster_id = self.create_cluster(name=cluster_name, mode="ha_compact")
self.update_vlan_network_fixed(cluster_id, amount=8, network_size=32)
self.basic_provisioning(cluster_id, nodes_dict)
self.assertClusterReady(
'slave-01', smiles_count=16, networks_count=8, timeout=300)
self.get_ebtables(cluster_id, self.nodes().slaves[:5]).restore_vlans()
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
self.run_OSTF(cluster_id=cluster_id, should_fail=6, should_pass=18)
if __name__ == '__main__':
unittest.main()

View File

@ -1,87 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from nose.plugins.attrib import attr
from fuelweb_test.integration.base_node_test_case import BaseNodeTestCase
from fuelweb_test.integration.decorators import snapshot_errors, \
debug, fetch_logs
logging.basicConfig(
format=':%(lineno)d: %(asctime)s %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class TestNode(BaseNodeTestCase):
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_4')
def test_ha_cluster_flat(self):
cluster_id = self.prepare_environment(
name="ha_flat",
mode="ha_compact",
settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
}
)
self.assertClusterReady(
'slave-01', smiles_count=16, networks_count=1, timeout=300)
self.get_ebtables(cluster_id, self.nodes().slaves[:5]).restore_vlans()
task = self._run_network_verify(cluster_id)
self.assertTaskSuccess(task, 60 * 2)
self.run_OSTF(cluster_id=cluster_id, should_fail=6, should_pass=18)
@snapshot_errors
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_4')
def test_ha_add_compute(self):
cluster_id = self.prepare_environment(
name="ha_flat",
mode="ha_compact",
settings={
'nodes': {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
}
)
self.bootstrap_nodes(self.nodes().slaves[5:6])
self.update_nodes(cluster_id, {'slave-06': ['compute']}, True, False)
task = self.client.deploy_cluster_changes(cluster_id)
self.assertTaskSuccess(task)
self.assertEqual(6, len(self.client.list_cluster_nodes(cluster_id)))
self.run_OSTF(cluster_id=cluster_id, should_fail=6, should_pass=18)
if __name__ == '__main__':
unittest.main()

View File

@ -1,85 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from devops.helpers.helpers import wait
from nose.plugins.attrib import attr
from fuelweb_test.helpers import Ebtables
from fuelweb_test.integration.base_node_test_case import BaseNodeTestCase
from fuelweb_test.integration.decorators import snapshot_errors, \
debug, fetch_logs
from fuelweb_test.settings import EMPTY_SNAPSHOT
logging.basicConfig(
format=':%(lineno)d: %(asctime)s %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class TestNodeNegative(BaseNodeTestCase):
@logwrap
@fetch_logs
@attr(releases=['centos', 'redhat'], test_thread='thread_2')
def test_untagged_networks_negative(self):
cluster_name = 'simple_untagged'
vlan_turn_off = {'vlan_start': None}
nodes = {
'slave-01': ['controller'],
'slave-02': ['compute']
}
interfaces = {
'eth0': ["fixed"],
'eth1': ["public", "floating"],
'eth2': ["management", "storage"],
'eth3': []
}
self.prepare_environment()
# create a new empty cluster and add nodes to it:
cluster_id = self.create_cluster(name=cluster_name)
self.bootstrap_nodes(self.nodes().slaves[:2])
self.update_nodes(cluster_id, nodes)
# assign all networks to second network interface:
nets = self.client.get_networks(cluster_id)['networks']
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.update_node_networks(node['id'], interfaces)
# select networks that will be untagged:
[net.update(vlan_turn_off) for net in nets]
# stop using VLANs:
self.client.update_network(cluster_id,
networks=nets)
# run network check:
task = self._run_network_verify(cluster_id)
self.assertTaskFailed(task, 60 * 5)
# deploy cluster:
task = self.deploy_cluster(cluster_id)
self.assertTaskFailed(task)
if __name__ == '__main__':
unittest.main()

View File

@ -1,10 +0,0 @@
test: test-integration
.PHONY: test-integration
test-integration: $(BUILD_DIR)/iso/iso.done
ENV_NAME=$(ENV_NAME) ISO_PATH=$(abspath $(ISO_PATH)) LOGS_DIR=$(LOGS_DIR) nosetests -l $(LEVEL) $(NOSEARGS) -w $(SOURCE_DIR)/fuelweb_test/integration --with-xunit -s
.PHONY: clean-integration-test
clean-integration-test: /:=$/
clean-integration-test:
dos.py erase $(ENV_NAME) || true

View File

@ -1,241 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuelweb_test.helpers import HTTPClient
from fuelweb_test.integration.decorators import debug, json_parse
from fuelweb_test.settings import OPENSTACK_RELEASE
logger = logging.getLogger(__name__)
logwrap = debug(logger)
class NailgunClient(object):
def __init__(self, ip):
self.client = HTTPClient(url="http://%s:8000" % ip)
super(NailgunClient, self).__init__()
@logwrap
def get_root(self):
return self.client.get("/")
@logwrap
@json_parse
def list_nodes(self):
return self.client.get("/api/nodes/")
@logwrap
@json_parse
def list_cluster_nodes(self, cluster_id):
return self.client.get("/api/nodes/?cluster_id=%s" % cluster_id)
@logwrap
@json_parse
def get_networks(self, cluster_id):
return self.client.get(
"/api/clusters/%d/network_configuration/" % cluster_id)
@logwrap
@json_parse
def verify_networks(self, cluster_id, networks):
return self.client.put(
"/api/clusters/%d/network_configuration/verify/" % cluster_id,
{'networks': networks}
)
@logwrap
@json_parse
def get_cluster_attributes(self, cluster_id):
return self.client.get(
"/api/clusters/%s/attributes/" % cluster_id
)
@logwrap
@json_parse
def update_cluster_attributes(self, cluster_id, attrs):
return self.client.put(
"/api/clusters/%s/attributes/" % cluster_id, attrs
)
@logwrap
@json_parse
def get_cluster(self, cluster_id):
return self.client.get(
"/api/clusters/%s" % cluster_id)
@logwrap
@json_parse
def update_cluster(self, cluster_id, data):
return self.client.put(
"/api/clusters/%s/" % cluster_id,
data
)
@logwrap
@json_parse
def delete_cluster(self, cluster_id):
return self.client.delete(
"/api/clusters/%s/" % cluster_id
)
@logwrap
@json_parse
def update_node(self, node_id, data):
return self.client.put(
"/api/nodes/%s/" % node_id, data
)
@logwrap
@json_parse
def update_nodes(self, data):
return self.client.put(
"/api/nodes", data
)
@logwrap
@json_parse
def deploy_cluster_changes(self, cluster_id):
return self.client.put(
"/api/clusters/%d/changes/" % cluster_id
)
@logwrap
@json_parse
def get_task(self, task_id):
return self.client.get("/api/tasks/%s" % task_id)
@logwrap
@json_parse
def get_tasks(self):
return self.client.get("/api/tasks")
@logwrap
@json_parse
def get_releases(self):
return self.client.get("/api/releases/")
@logwrap
@json_parse
def get_node_disks(self, disk_id):
return self.client.get("/api/nodes/%s/disks" % disk_id)
@logwrap
def get_release_id(self, release_name=OPENSTACK_RELEASE):
for release in self.get_releases():
if release["name"].find(release_name) != -1:
return release["id"]
@logwrap
@json_parse
def get_node_interfaces(self, node_id):
return self.client.get("/api/nodes/%s/interfaces" % node_id)
@logwrap
@json_parse
def put_node_interfaces(self, data):
return self.client.put("/api/nodes/interfaces", data)
@logwrap
@json_parse
def list_clusters(self):
return self.client.get("/api/clusters/")
@logwrap
@json_parse
def create_cluster(self, data):
return self.client.post(
"/api/clusters",
data=data
)
@logwrap
@json_parse
def get_ostf_test_sets(self):
return self.client.get("/ostf/testsets")
@logwrap
@json_parse
def get_ostf_tests(self):
return self.client.get("/ostf/tests")
@logwrap
@json_parse
def get_ostf_test_run(self, cluster_id):
return self.client.get("/ostf/testruns/last/%s" % cluster_id)
@logwrap
@json_parse
def ostf_run_tests(self, cluster_id, test_sets_list):
data = []
for test_set in test_sets_list:
data.append(
{
'metadata': {'cluster_id': cluster_id, 'config': {}},
'testset': test_set
}
)
return self.client.post("/ostf/testruns", data)
@logwrap
@json_parse
def update_network(self, cluster_id, networks=None, net_manager=None):
data = {}
if networks is not None:
data.update({'networks': networks})
if net_manager is not None:
data.update({'net_manager': net_manager})
return self.client.put(
"/api/clusters/%d/network_configuration" % cluster_id, data
)
@logwrap
def get_cluster_id(self, name):
for cluster in self.list_clusters():
if cluster["name"] == name:
return cluster["id"]
@logwrap
def add_syslog_server(self, cluster_id, host, port):
# Here we updating cluster editable attributes
# In particular we set extra syslog server
attributes = self.get_cluster_attributes(cluster_id)
attributes["editable"]["syslog"]["syslog_server"]["value"] = host
attributes["editable"]["syslog"]["syslog_port"]["value"] = port
self.update_cluster_attributes(cluster_id, attributes)
@logwrap
def clean_clusters(self):
for cluster in self.list_clusters():
self.delete_cluster(cluster["id"])
@logwrap
def _get_cluster_vlans(self, cluster_id):
cluster_vlans = []
for network in self.get_networks(cluster_id)['networks']:
amount = network.get('amount', 1)
cluster_vlans.extend(range(network['vlan_start'],
network['vlan_start'] + amount))
return cluster_vlans
@logwrap
@json_parse
def get_notifications(self):
return self.client.get("/api/notifications")
@logwrap
@json_parse
def update_redhat_setup(self, data):
return self.client.post("/api/redhat/setup", data=data)

View File

@ -1,37 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class NodeRoles(object):
def __init__(self,
admin_names=None,
other_names=None):
self.admin_names = admin_names or []
self.other_names = other_names or []
class Nodes(object):
def __init__(self, environment, node_roles):
self.admins = []
self.others = []
for node_name in node_roles.admin_names:
self.admins.append(environment.node_by_name(node_name))
for node_name in node_roles.other_names:
self.others.append(environment.node_by_name(node_name))
self.slaves = self.others
self.all = self.slaves + self.admins
self.admin = self.admins[0]
def __iter__(self):
return self.all.__iter__()

View File

@ -1,14 +0,0 @@
sudo pip install virtualenv
virtualenv ../fuelweb_test --system-site-packages
. ../fuelweb_test/bin/activate
pip install -r fuelweb_test/requirements.txt
sudo sed -ir 's/(local\s+all\s+postgres\s+)peer/\1trust/' /etc/postgresql/9.1/main/pg_hba.conf
# make sure /etc/postgresql/9.1/main/pg_hba.conf has following line
# local all postgres trust
sudo service postgresql restart
django-admin.py syncdb --settings devops.settings
export ISO_PATH=`pwd`/build/iso/nailgun-centos-6.4-amd64.iso
export ENV_NAME="fuelweb_test" # Or any other name you need
dos.py erase $ENV_NAME
nosetests -w fuelweb_test

View File

@ -1,4 +0,0 @@
nose==1.2.1
git+ssh://git@github.com/Mirantis/devops.git@fcaf0931dc850e9742b4b6a28681e6cdef7c0f83
anyjson==0.3.1
paramiko==1.10.1

View File

@ -1,22 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
REPOSITORY_ROOT = here('..')
root = lambda *x: os.path.join(os.path.abspath(REPOSITORY_ROOT), *x)

View File

@ -1,82 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
ISO_PATH = os.environ.get('ISO_PATH')
EMPTY_SNAPSHOT = os.environ.get('EMPTY_SNAPSHOT', 'empty')
READY_SNAPSHOT = os.environ.get('READY_SNAPSHOT', 'ready')
OPENSTACK_RELEASE_CENTOS = 'Grizzly on CentOS 6.4'
OPENSTACK_RELEASE_REDHAT = 'RHOS 3.0 for RHEL 6.4'
OPENSTACK_RELEASE = os.environ.get(
'OPENSTACK_RELEASE', OPENSTACK_RELEASE_CENTOS)
REDHAT_LICENSE_TYPE = os.environ.get('REDHAT_LICENSE_TYPE')
REDHAT_USERNAME = os.environ.get('REDHAT_USERNAME')
REDHAT_PASSWORD = os.environ.get('REDHAT_PASSWORD')
REDHAT_SATELLITE_HOST = os.environ.get('REDHAT_SATELLITE_HOST')
REDHAT_ACTIVATION_KEY = os.environ.get('REDHAT_ACTIVATION_KEY')
INTERFACE_ORDER = ('internal', 'public', 'private', 'nat')
PUBLIC_FORWARD = os.environ.get('PUBLIC_FORWARD', None)
FORWARDING = {
'public': PUBLIC_FORWARD,
'internal': None,
'private': None,
'nat': 'nat',
}
DHCP = {
'public': False,
'internal': False,
'private': False,
'nat': True,
}
INTERFACES = {
'internal': 'eth0',
'public': 'eth1',
'private': 'eth2',
'nat': 'eth3',
}
DEFAULT_POOLS = {
'public': '10.108.0.0/16:24',
'private': '10.108.0.0/16:24',
'internal': '10.108.0.0/16:24',
'nat': '10.108.0.0/16:24',
}
POOLS = {
'public': os.environ.get('PUBLIC_POOL',
DEFAULT_POOLS.get('public')).split(':'),
'private': os.environ.get('PRIVATE_POOL',
DEFAULT_POOLS.get('private')).split(':'),
'internal': os.environ.get('INTERNAL_POOL',
DEFAULT_POOLS.get('internal')).split(':'),
'nat': os.environ.get('NAT_POOL',
DEFAULT_POOLS.get('nat')).split(':'),
}
NETWORK_MANAGERS = {
'flat': 'FlatDHCPManager',
'vlan': 'VlanManager'
}
CLEAN = os.environ.get('CLEAN', 'true') == 'true'
LOGS_DIR = os.environ.get('LOGS_DIR')

View File

@ -1 +0,0 @@
naMu7aej

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,4 +0,0 @@
1323560292.885204
6.3
x86_64
1

View File

@ -1,20 +0,0 @@
[general]
family = CentOS
timestamp = 1323560005.81
variant =
totaldiscs = 1
version = 6.3
discnum = 1
packagedir =
arch = x86_64
[images-x86_64]
kernel = images/pxeboot/vmlinuz
initrd = images/pxeboot/initrd.img
[images-xen]
kernel = images/pxeboot/vmlinuz
initrd = images/pxeboot/initrd.img
[stage2]
mainimage = images/install.img

View File

@ -1,17 +0,0 @@
#
# This service starts bootstrap_admin_node.sh script
# which launches puppet but only if /etc/sysconfig/bootstrap_admin_node file exists.
#
start on stopped start-ttys
task
console output
script
. /etc/sysconfig/bootstrap_admin_node
if test "${ENABLED}" = "1"; then
echo "ENABLED=0" > /etc/sysconfig/bootstrap_admin_node
/usr/local/sbin/bootstrap_admin_node.sh 2>&1 | tee /var/log/puppet/bootstrap_admin_node.log
fi
initctl start tty TTY=/dev/tty1
end script

View File

@ -1,3 +0,0 @@
#!/bin/bash
puppet apply /etc/puppet/modules/nailgun/examples/site.pp

View File

@ -1,22 +0,0 @@
default vesamenu.c32
#prompt 1
timeout 300
display boot.msg
menu background splash.jpg
menu title Welcome to Fuel Installer!
menu color border 0 #ffffffff #00000000
menu color sel 7 #ffffffff #ff000000
menu color title 0 #ffffffff #00000000
menu color tabmsg 0 #ffffffff #00000000
menu color unsel 0 #ffffffff #00000000
menu color hotsel 0 #ff000000 #ffffffff
menu color hotkey 7 #ffffffff #ff000000
menu color scrollbar 0 #ffffffff #00000000
label nailgunstatic
menu label Fuel Install (^Static IP)
menu default
kernel vmlinuz
append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

View File

@ -1,74 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import argparse
import yaml
from jinja2 import FileSystemLoader
from jinja2 import Environment
class KickstartFile(object):
def __init__(self, config_filename, template_filename):
with open(config_filename, "r") as f:
self.config = yaml.load(f.read())
self.env = Environment(
loader=FileSystemLoader(
os.path.dirname(os.path.abspath(template_filename))
)
)
self.template = self.env.get_template(
os.path.basename(os.path.abspath(template_filename))
)
def render(self):
return self.template.render(self.config)
def render2file(self, filename):
with open(filename, "w") as f:
f.write(self.render())
if __name__ == "__main__":
description = """
This script builds kickstart file to using jinja2 template system.
"""
parser = argparse.ArgumentParser(epilog=description)
parser.add_argument(
'-t', '--template', dest='template', action='store', type=str,
help='kickstart template file', required=True
)
parser.add_argument(
'-c', '--config', dest='config', action='store', type=str,
help='yaml config file', required=True
)
parser.add_argument(
'-o', '--output', dest='output', action='store', type=str,
help='where to output templating result', default='-'
)
params, other_params = parser.parse_known_args()
ks = KickstartFile(
config_filename=params.config,
template_filename=params.template
)
if params.output == '-':
print ks.render()
else:
ks.render2file(params.output)

View File

@ -1,315 +0,0 @@
install
text
%include /tmp/source.ks
{{reboot}}
lang en_US.UTF-8
keyboard us
rootpw r00tme
timezone --utc Etc/UTC
firewall --disabled
selinux --disabled
zerombr
%include /tmp/bootloader.ks
%include /tmp/partition.ks
%pre
#!/bin/sh
# hard drives
drives=""
for drv in `ls -1 /sys/block | grep "sd\|hd\|vd\|cciss"`; do
if (grep -q 0 /sys/block/${drv}/removable); then
d=`echo ${drv} | sed -e 's/!/\//'`
drives="${drives} ${d}"
fi
done
set ${drives}
numdrives=`echo $#`
tgtdrive="undefined"
if [ ${numdrives} -gt 1 ]; then
exec < /dev/tty3 > /dev/tty3 2>&1
chvt 3
while [ "${tgtdrive}" = "undefined" ]; do
clear
echo
echo '********************************************************************************'
echo '* W A R N I N G *'
echo '* *'
echo '* Which of the detected hard drives do you want to be used as *'
echo '* installation target? *'
echo '* *'
echo '********************************************************************************'
echo
echo "Possible choices: ${drives}"
echo
read -p "Choose hard drive: " tgtdrive
done
clear
chvt 1
else
tgtdrive=`echo ${drives} | sed -e "s/^\s*//" -e "s/\s*$//"`
fi
# source
if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
echo "harddrive --partition=UUID=will_be_substituted_with_actual_uuid --dir=/" > /tmp/source.ks
else
echo "cdrom" > /tmp/source.ks
fi
hdparm -z /dev/${tgtdrive}
dd if=/dev/zero of=/dev/${tgtdrive} bs=1M count=10
sleep 5
hdparm -z /dev/${tgtdrive}
parted -s /dev/${tgtdrive} mklabel gpt
parted -a none -s /dev/${tgtdrive} unit MiB mkpart primary 0 24
parted -s /dev/${tgtdrive} set 1 bios_grub on
parted -a none -s /dev/${tgtdrive} unit MiB mkpart primary fat16 24 224
parted -s /dev/${tgtdrive} set 2 boot on
parted -a none -s /dev/${tgtdrive} unit MiB mkpart primary 224 424
sleep 10
hdparm -z /dev/${tgtdrive}
# partition
echo > /tmp/partition.ks
echo "partition /boot --onpart=/dev/${tgtdrive}3" >> /tmp/partition.ks
echo "partition pv.001 --ondisk=${tgtdrive} --size=1 --grow" >> /tmp/partition.ks
echo "volgroup os pv.001" >> /tmp/partition.ks
echo "logvol swap --vgname=os --recommended --name=swap" >> /tmp/partition.ks
echo "logvol / --vgname=os --size=10000 --name=root --fstype=ext4" >> /tmp/partition.ks
echo "logvol /var --vgname=os --size=1 --grow --name=var --fstype=xfs" >> /tmp/partition.ks
# bootloader
echo "bootloader --location=mbr --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
# Anaconda can not install grub 0.97 on disks which are >4T.
# The reason is that grub does not support such large geometries
# and it simply thinks that the cylinder number has negative value.
# Here we just set geometry manually so that grub thinks that disk
# size is equal to 1G.
# 130 cylinders * (16065 * 512 = 8225280 bytes) = 1G
echo "%post --nochroot --log=/mnt/sysimage/root/anaconda-post-partition.log" > /tmp/post_partition.ks
echo "echo \"device (hd0) /dev/${tgtdrive}\" >> /tmp/grub.script" >> /tmp/post_partition.ks
echo "echo \"geometry (hd0) 130 255 63\" >> /tmp/grub.script" >> /tmp/post_partition.ks
echo "echo \"root (hd0,2)\" >> /tmp/grub.script" >> /tmp/post_partition.ks
echo "echo \"install /grub/stage1 (hd0) /grub/stage2 p /grub/grub.conf\" >> /tmp/grub.script" >> /tmp/post_partition.ks
echo "echo quit >> /tmp/grub.script" >> /tmp/post_partition.ks
echo "cat /tmp/grub.script | chroot /mnt/sysimage /sbin/grub --no-floppy --batch" >> /tmp/post_partition.ks
%end
%packages --nobase --excludedocs
@Core
authconfig
bind-utils
cronie
crontabs
curl
man
mlocate
ntp
openssh-clients
policycoreutils
puppet-2.7.19
selinux-policy-targeted
subscription-manager
system-config-firewall-base
tcpdump
vim-enhanced
rubygem-netaddr
rubygem-openstack
wget
yum
%include /tmp/post_partition.ks
# Mount installation media in chroot
%post --nochroot
#!/bin/sh
if [ -d /mnt/source ] ; then
mkdir -p /mnt/sysimage/tmp/source
mount -o bind /mnt/source /mnt/sysimage/tmp/source
fi
%post --log=/root/anaconda-post.log
#!/bin/sh
set -x
function save_cfg {
scrFile="/etc/sysconfig/network-scripts/ifcfg-$device"
sed -i -e 's#^\(HOSTNAME=\).*$#\1'"$hostname"'#' /etc/sysconfig/network
grep -q "^\s*$ip\s+$hostname" /etc/hosts || echo "$ip $hostname" >> /etc/hosts
echo GATEWAY=$gw >> /etc/sysconfig/network
echo "nameserver 127.0.0.1" > /etc/resolv.conf
[ $dns1 ] && echo "nameserver $dns1" > /etc/dnsmasq.upstream
[ $dns2 ] && echo "nameserver $dns2" >> /etc/dnsmasq.upstream
echo DEVICE=$device > $scrFile
echo ONBOOT=yes >> $scrFile
echo NM_CONTROLLED=no >> $scrFile
echo HWADDR=$hwaddr >> $scrFile
echo USERCTL=no >> $scrFile
echo PEERDNS=no >> $scrFile
if [ $ip ]; then
echo BOOTPROTO=static >> $scrFile
echo IPADDR=$ip >> $scrFile
echo NETMASK=$netmask >> $scrFile
else
echo BOOTPROTO=dhcp >> $scrFile
fi
}
# Default FQDN
hostname="nailgun.mirantis.com"
for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
hostname=$hostname
ip=$ip
netmask=$netmask
gw=$gw
device="eth0"
hwaddr=`ifconfig $device | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
save_cfg
# Mounting installation source
SOURCE=/tmp/source
FS=/tmp/fs
echo
mkdir -p ${SOURCE}
mkdir -p ${FS}
if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
mount /dev/disk/by-uuid/will_be_substituted_with_actual_uuid ${FS}
mount -o loop ${FS}/nailgun.iso ${SOURCE}
fi
# Copying Repo to the nailgun /var/www directory
repodir="/var/www/nailgun"
mkdir -p ${repodir}/centos/fuelweb/x86_64
cp -r ${SOURCE}/images ${repodir}/centos/fuelweb/x86_64
cp -r ${SOURCE}/isolinux ${repodir}/centos/fuelweb/x86_64
cp -r ${SOURCE}/repodata ${repodir}/centos/fuelweb/x86_64
cp -r ${SOURCE}/Packages ${repodir}/centos/fuelweb/x86_64
cp ${SOURCE}/.treeinfo ${repodir}/centos/fuelweb/x86_64
cp -r ${SOURCE}/rhel ${repodir}
# Copying Ubuntu files
mkdir -p ${repodir}/ubuntu/fuelweb/x86_64/images
cp -r ${SOURCE}/ubuntu/conf ${repodir}/ubuntu/fuelweb/x86_64
cp -r ${SOURCE}/ubuntu/db ${repodir}/ubuntu/fuelweb/x86_64
cp -r ${SOURCE}/ubuntu/dists ${repodir}/ubuntu/fuelweb/x86_64
cp -r ${SOURCE}/ubuntu/pool ${repodir}/ubuntu/fuelweb/x86_64
cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux ${repodir}/ubuntu/fuelweb/x86_64/images
cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz ${repodir}/ubuntu/fuelweb/x86_64/images
# Copying eggs/gems to the nailgun directory
cp -r ${SOURCE}/eggs ${repodir}
cp -r ${SOURCE}/gems ${repodir}
# Copying bootstrap image
mkdir -p ${repodir}/bootstrap
cp -r ${SOURCE}/bootstrap/initramfs.img ${repodir}/bootstrap
cp -r ${SOURCE}/bootstrap/linux ${repodir}/bootstrap
mkdir -p /root/.ssh
chmod 700 /root/.ssh
cp ${SOURCE}/bootstrap/bootstrap.rsa /root/.ssh
chmod 600 /root/.ssh/bootstrap.rsa
# Unpacking puppet manifests for master and slave
cp ${SOURCE}/puppet-slave.tgz ${repodir}/
#mkdir -p /opt/nailgun_puppet
#tar zxf ${SOURCE}/puppet-nailgun.tgz -C /opt/nailgun_puppet
tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/modules
mkdir -p /etc/puppet/manifests/
cp /etc/puppet/modules/osnailyfacter/examples/site.pp /etc/puppet/manifests/site.pp
cp ${SOURCE}/send2syslog.py /bin/send2syslog.py
#ln -s /etc/puppet/modules/mcollective /etc/puppet/modules/nailgun /etc/puppet/modules/osnailyfacter /etc/puppet/modules/stdlib /etc/puppet/modules/rabbitmq /etc/puppet/modules/puppetdb/ /etc/puppet/modules/postgresql/ /etc/puppet/modules/inifile/ /etc/puppet/modules/sysctl/ /opt/nailgun_puppet/
# Prepare local repository specification
rm /etc/yum.repos.d/CentOS*.repo
cat > /etc/yum.repos.d/nailgun.repo << EOF
[nailgun]
name=Nailgun Local Repo
baseurl=file:/var/www/nailgun/centos/fuelweb/x86_64
gpgcheck=0
EOF
# Disable subscription-manager plugins
sed -i 's/^enabled.*/enabled=0/' /etc/yum/pluginconf.d/product-id.conf || :
sed -i 's/^enabled.*/enabled=0/' /etc/yum/pluginconf.d/subscription-manager.conf || :
# Disable GSSAPI in ssh server config
sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
# Copying bootstrap_admin_node.sh, chmod it and
# adding /etc/init/bootstrap_admin_node.conf
cp ${SOURCE}/bootstrap_admin_node.sh /usr/local/sbin/bootstrap_admin_node.sh
chmod 0777 /usr/local/sbin/bootstrap_admin_node.sh
cp ${SOURCE}/bootstrap_admin_node.conf /etc/init/bootstrap_admin_node.conf
echo "ENABLED=1" > /etc/sysconfig/bootstrap_admin_node
# Copying version.yaml file. It contains COMMIT_SHA of last commit.
mkdir -p /etc/nailgun
cp ${SOURCE}/version.yaml /etc/nailgun/version.yaml
# Generete Fuel UUID
uuidgen > /etc/fuel-uuid
# Prepare custom /etc/issue logon banner and script for changing IP in it
cat > /etc/issue << EOF
#########################################
# Welcome to the Fuel server #
#########################################
Server is running on \m platform
Fuel Web UI is available on: http://:8000
Default administrator login: root
Default administrator password: r00tme
Please change root password on first login.
EOF
echo "sed -i \"s%\(^.*able on:\).*$%\1 http://\`ip address show eth0 | awk '/inet / {print \$2}' | cut -d/ -f1 -\`:8000%\" /etc/issue" >>/etc/rc.local
# Unmounting source
umount -f ${SOURCE}
rm -rf ${SOURCE}
umount -f ${FS} || true
rm -rf ${FS}
# Enabling/configuring NTPD and ntpdate services
echo "server 127.127.1.0" >> /etc/ntp.conf
echo "fudge 127.127.1.0 stratum 10" >> /etc/ntp.conf
echo "tos orphan 7" >> /etc/ntp.conf
chkconfig ntpd on
chkconfig ntpdate on
# Do not show error message on ntpdate failure. Customers should not be confused
# if admin node does not have access to the internet time servers.
sed -i /etc/rc.d/init.d/ntpdate -e 's/\([ $RETVAL -eq 0 ] && success || \)failure/\1success/'
# Disabling splash
sed -i --follow-symlinks -e '/^\skernel/ s/rhgb//' /etc/grub.conf
sed -i --follow-symlinks -e '/^\skernel/ s/quiet//' /etc/grub.conf
# Disabling console clearing
sed -i 's/getty/getty --noclear/' /etc/init/tty.conf
# Disabling starting first console from start-ttys service
sed -i --follow-symlinks -e 's/ACTIVE_CONSOLES=.*/ACTIVE_CONSOLES=\/dev\/tty\[2-6\]/' /etc/sysconfig/init
# Copying default bash settings to the root directory
cp -f /etc/skel/.bash* /root/
%end

View File

@ -1 +0,0 @@
reboot: reboot --eject

View File

@ -1,210 +0,0 @@
.PHONY: iso img
all: iso img
ISOROOT:=$(BUILD_DIR)/iso/isoroot
iso: $(BUILD_DIR)/iso/iso.done
img: $(BUILD_DIR)/iso/img.done
$(BUILD_DIR)/iso/isoroot-centos.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/packages/build.done \
$(BUILD_DIR)/iso/isoroot-dotfiles.done
mkdir -p $(ISOROOT)
rsync -rp $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/ $(ISOROOT)
createrepo -g $(ISOROOT)/comps.xml -x 'rhel/*' \
-u media://`head -1 $(ISOROOT)/.discinfo` $(ISOROOT)
$(ACTION.TOUCH)
$(BUILD_DIR)/iso/isoroot-ubuntu.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/packages/build.done \
$(BUILD_DIR)/iso/isoroot-dotfiles.done
mkdir -p $(ISOROOT)/ubuntu
rsync -rp $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/ $(ISOROOT)/ubuntu/
$(ACTION.TOUCH)
$(BUILD_DIR)/iso/isoroot-rhel.done: $(call depv,CACHE_RHEL)
$(BUILD_DIR)/iso/isoroot-rhel.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/packages/build.done \
$(BUILD_DIR)/iso/isoroot-dotfiles.done
ifeq ($(CACHE_RHEL),1)
mkdir -p $(ISOROOT)/rhel
rsync -rp $(LOCAL_MIRROR_RHEL)/ $(ISOROOT)/rhel
endif
$(ACTION.TOUCH)
$(BUILD_DIR)/iso/isoroot-eggs.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/packages/build.done
mkdir -p $(ISOROOT)/eggs
rsync -a --delete $(LOCAL_MIRROR_EGGS)/ $(ISOROOT)/eggs
$(ACTION.TOUCH)
$(BUILD_DIR)/iso/isoroot-gems.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/packages/build.done
mkdir -p $(ISOROOT)/gems
rsync -a --delete $(LOCAL_MIRROR_GEMS)/ $(ISOROOT)/gems
rsync -a $(BUILD_MIRROR_GEMS)/gems/ $(ISOROOT)/gems/gems
(cd $(ISOROOT)/gems && gem generate_index gems)
$(ACTION.TOUCH)
########################
# Extra files
########################
$(BUILD_DIR)/iso/isoroot-dotfiles.done: \
$(ISOROOT)/.discinfo \
$(ISOROOT)/.treeinfo
$(ACTION.TOUCH)
$(BUILD_DIR)/iso/isoroot-files.done: \
$(BUILD_DIR)/iso/isoroot-dotfiles.done \
$(ISOROOT)/isolinux/isolinux.cfg \
$(ISOROOT)/isolinux/splash.jpg \
$(ISOROOT)/ks.cfg \
$(ISOROOT)/bootstrap_admin_node.sh \
$(ISOROOT)/bootstrap_admin_node.conf \
$(ISOROOT)/send2syslog.py \
$(ISOROOT)/version.yaml \
$(ISOROOT)/puppet-slave.tgz
$(ACTION.TOUCH)
$(ISOROOT)/.discinfo: $(SOURCE_DIR)/iso/.discinfo ; $(ACTION.COPY)
$(ISOROOT)/.treeinfo: $(SOURCE_DIR)/iso/.treeinfo ; $(ACTION.COPY)
$(ISOROOT)/isolinux/isolinux.cfg: $(SOURCE_DIR)/iso/isolinux/isolinux.cfg ; $(ACTION.COPY)
$(ISOROOT)/isolinux/splash.jpg: $(SOURCE_DIR)/iso/isolinux/splash.jpg ; $(ACTION.COPY)
$(ISOROOT)/ks.cfg: $(call depv,KSYAML)
$(ISOROOT)/ks.cfg: $(SOURCE_DIR)/iso/ks.template $(SOURCE_DIR)/iso/ks.py $(KSYAML)
python $(SOURCE_DIR)/iso/ks.py -t $(SOURCE_DIR)/iso/ks.template -c $(KSYAML) -o $@
$(ISOROOT)/bootstrap_admin_node.sh: $(SOURCE_DIR)/iso/bootstrap_admin_node.sh ; $(ACTION.COPY)
$(ISOROOT)/bootstrap_admin_node.conf: $(SOURCE_DIR)/iso/bootstrap_admin_node.conf ; $(ACTION.COPY)
$(ISOROOT)/send2syslog.py: $(SOURCE_DIR)/bin/send2syslog.py ; $(ACTION.COPY)
$(ISOROOT)/version.yaml: $(call depv,COMMIT_SHA)
$(ISOROOT)/version.yaml: $(call depv,PRODUCT_VERSION)
$(ISOROOT)/version.yaml: $(call depv,FUEL_COMMIT_SHA)
$(ISOROOT)/version.yaml:
echo "COMMIT_SHA: $(COMMIT_SHA)" > $@
echo "PRODUCT_VERSION: $(PRODUCT_VERSION)" >> $@
echo "FUEL_COMMIT_SHA: $(FUEL_COMMIT_SHA)" >> $@
$(ISOROOT)/puppet-slave.tgz: \
$(call find-files,$(SOURCE_DIR)/fuel/deployment/puppet)
(cd $(SOURCE_DIR)/fuel/deployment/puppet && tar rf $(ISOROOT)/puppet-slave.tar ./*)
gzip -c -9 $(ISOROOT)/puppet-slave.tar > $@ && \
rm $(ISOROOT)/puppet-slave.tar
########################
# Bootstrap image.
########################
BOOTSTRAP_FILES:=initramfs.img linux
$(BUILD_DIR)/iso/isoroot-bootstrap.done: \
$(ISOROOT)/bootstrap/bootstrap.rsa \
$(addprefix $(ISOROOT)/bootstrap/, $(BOOTSTRAP_FILES))
$(ACTION.TOUCH)
$(addprefix $(ISOROOT)/bootstrap/, $(BOOTSTRAP_FILES)): \
$(BUILD_DIR)/bootstrap/build.done
@mkdir -p $(@D)
cp $(BUILD_DIR)/bootstrap/$(@F) $@
$(ISOROOT)/bootstrap/bootstrap.rsa: $(SOURCE_DIR)/bootstrap/ssh/id_rsa ; $(ACTION.COPY)
########################
# Iso image root file system.
########################
$(BUILD_DIR)/iso/isoroot.done: \
$(BUILD_DIR)/mirror/build.done \
$(BUILD_DIR)/packages/build.done \
$(BUILD_DIR)/iso/isoroot-centos.done \
$(BUILD_DIR)/iso/isoroot-ubuntu.done \
$(BUILD_DIR)/iso/isoroot-rhel.done \
$(BUILD_DIR)/iso/isoroot-eggs.done \
$(BUILD_DIR)/iso/isoroot-gems.done \
$(BUILD_DIR)/iso/isoroot-files.done \
$(BUILD_DIR)/iso/isoroot-bootstrap.done
$(ACTION.TOUCH)
########################
# Building CD and USB stick images
########################
# keep in mind that mkisofs touches some files inside directory
# from which it builds iso image
# that is why we need to make isoroot.done dependent on some files
# and then copy these files into another directory
$(BUILD_DIR)/iso/iso.done: $(BUILD_DIR)/iso/isoroot.done
rm -f $(ISO_PATH)
mkdir -p $(BUILD_DIR)/iso/isoroot-mkisofs
rsync -a --delete $(ISOROOT)/ $(BUILD_DIR)/iso/isoroot-mkisofs
sudo sed -r -i -e "s/ip=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/ip=$(MASTER_IP)/" $(BUILD_DIR)/iso/isoroot-mkisofs/isolinux/isolinux.cfg
sudo sed -r -i -e "s/dns1=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/dns1=$(MASTER_DNS)/" $(BUILD_DIR)/iso/isoroot-mkisofs/isolinux/isolinux.cfg
sudo sed -r -i -e "s/netmask=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/netmask=$(MASTER_NETMASK)/" $(BUILD_DIR)/iso/isoroot-mkisofs/isolinux/isolinux.cfg
sudo sed -r -i -e "s/gw=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/gw=$(MASTER_GW)/" $(BUILD_DIR)/iso/isoroot-mkisofs/isolinux/isolinux.cfg
mkisofs -r -V "Mirantis Fuel" -p "Mirantis Inc." \
-J -T -R -b isolinux/isolinux.bin \
-no-emul-boot \
-boot-load-size 4 -boot-info-table \
-x "lost+found" -o $(ISO_PATH) $(BUILD_DIR)/iso/isoroot-mkisofs
implantisomd5 $(ISO_PATH)
$(ACTION.TOUCH)
# IMGSIZE is calculated as a sum of nailgun iso size plus
# installation images directory size (~165M) and syslinux directory size (~35M)
# plus a bit of free space for ext2 filesystem data
# +300M seems reasonable
IMGSIZE = $(shell echo "$(shell ls -s $(ISO_PATH) | awk '{print $$1}') * 1.3 / 1024" | bc)
$(BUILD_DIR)/iso/img.done: $(BUILD_DIR)/iso/iso.done
rm -f $(BUILD_DIR)/iso/img_loop_device
rm -f $(BUILD_DIR)/iso/img_loop_partition
rm -f $(BUILD_DIR)/iso/img_loop_uuid
sudo losetup -j $(IMG_PATH) | awk -F: '{print $$1}' | while read loopdevice; do \
sudo kpartx -v $$loopdevice | awk '{print "/dev/mapper/" $$1}' | while read looppartition; do \
sudo umount -f $$looppartition; \
done; \
sudo kpartx -d $$loopdevice; \
sudo losetup -d $$loopdevice; \
done
rm -f $(IMG_PATH)
dd if=/dev/zero of=$(IMG_PATH) bs=1M count=$(IMGSIZE)
sudo losetup -f > $(BUILD_DIR)/iso/img_loop_device
sudo losetup `cat $(BUILD_DIR)/iso/img_loop_device` $(IMG_PATH)
sudo parted -s `cat $(BUILD_DIR)/iso/img_loop_device` mklabel msdos
sudo parted -s `cat $(BUILD_DIR)/iso/img_loop_device` unit MB mkpart primary ext2 1 $(IMGSIZE) set 1 boot on
sudo kpartx -a -v `cat $(BUILD_DIR)/iso/img_loop_device` | awk '{print "/dev/mapper/" $$3}' > $(BUILD_DIR)/iso/img_loop_partition
sleep 1
sudo mkfs.ext2 `cat $(BUILD_DIR)/iso/img_loop_partition`
mkdir -p $(BUILD_DIR)/iso/imgroot
sudo mount `cat $(BUILD_DIR)/iso/img_loop_partition` $(BUILD_DIR)/iso/imgroot
sudo extlinux -i $(BUILD_DIR)/iso/imgroot
sudo /sbin/blkid -s UUID -o value `cat $(BUILD_DIR)/iso/img_loop_partition` > $(BUILD_DIR)/iso/img_loop_uuid
sudo dd conv=notrunc bs=440 count=1 if=/usr/lib/extlinux/mbr.bin of=`cat $(BUILD_DIR)/iso/img_loop_device`
sudo cp -r $(BUILD_DIR)/iso/isoroot/images $(BUILD_DIR)/iso/imgroot
sudo cp -r $(BUILD_DIR)/iso/isoroot/isolinux $(BUILD_DIR)/iso/imgroot
sudo mv $(BUILD_DIR)/iso/imgroot/isolinux $(BUILD_DIR)/iso/imgroot/syslinux
sudo rm $(BUILD_DIR)/iso/imgroot/syslinux/isolinux.cfg
sudo cp $(SOURCE_DIR)/iso/syslinux/syslinux.cfg $(BUILD_DIR)/iso/imgroot/syslinux # NOTE(mihgen): Is it used for IMG file? Comments needed!
sudo sed -i -e "s/will_be_substituted_with_actual_uuid/`cat $(BUILD_DIR)/iso/img_loop_uuid`/g" $(BUILD_DIR)/iso/imgroot/syslinux/syslinux.cfg
sudo sed -r -i -e "s/ip=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/ip=$(MASTER_IP)/" $(BUILD_DIR)/iso/imgroot/syslinux/syslinux.cfg
sudo sed -r -i -e "s/dns1=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/dns1=$(MASTER_DNS)/" $(BUILD_DIR)/iso/imgroot/syslinux/syslinux.cfg
sudo sed -r -i -e "s/netmask=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/netmask=$(MASTER_NETMASK)/" $(BUILD_DIR)/iso/imgroot/syslinux/syslinux.cfg
sudo sed -r -i -e "s/gw=[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/gw=$(MASTER_GW)/" $(BUILD_DIR)/iso/imgroot/syslinux/syslinux.cfg
sudo cp $(BUILD_DIR)/iso/isoroot/ks.cfg $(BUILD_DIR)/iso/imgroot/ks.cfg
sudo sed -i -e "s/will_be_substituted_with_actual_uuid/`cat $(BUILD_DIR)/iso/img_loop_uuid`/g" $(BUILD_DIR)/iso/imgroot/ks.cfg
sudo cp $(ISO_PATH) $(BUILD_DIR)/iso/imgroot/nailgun.iso
sudo sync
sudo umount `cat $(BUILD_DIR)/iso/img_loop_partition`
sudo kpartx -d `cat $(BUILD_DIR)/iso/img_loop_device`
sudo losetup -d `cat $(BUILD_DIR)/iso/img_loop_device`
$(ACTION.TOUCH)

View File

@ -1,22 +0,0 @@
default vesamenu.c32
#prompt 1
timeout 300
display boot.msg
menu background splash.jpg
menu title Welcome to Fuel Installer!
menu color border 0 #ffffffff #00000000
menu color sel 7 #ffffffff #ff000000
menu color title 0 #ffffffff #00000000
menu color tabmsg 0 #ffffffff #00000000
menu color unsel 0 #ffffffff #00000000
menu color hotsel 0 #ff000000 #ffffffff
menu color hotkey 7 #ffffffff #ff000000
menu color scrollbar 0 #ffffffff #00000000
label nailgunstatic
menu label Fuel Install (^Static IP)
menu default
kernel vmlinuz
append initrd=initrd.img biosdevname=0 repo=hd:UUID=will_be_substituted_with_actual_uuid:/ ks=hd:UUID=will_be_substituted_with_actual_uuid:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld

View File

@ -1,24 +0,0 @@
ISOLINUX_FILES:=boot.msg grub.conf initrd.img isolinux.bin memtest vesamenu.c32 vmlinuz
IMAGES_FILES:=efiboot.img efidisk.img install.img
EFI_FILES:=BOOTX64.conf BOOTX64.efi splash.xpm.gz
# centos isolinux files
$(addprefix $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/isolinux/,$(ISOLINUX_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_CENTOS_OS_BASEURL)/isolinux/$(@F)
# centos EFI boot images
$(addprefix $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/EFI/BOOT/,$(EFI_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_CENTOS_OS_BASEURL)/EFI/BOOT/$(@F)
# centos boot images
$(addprefix $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/images/,$(IMAGES_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_CENTOS_OS_BASEURL)/images/$(@F)
$(BUILD_DIR)/mirror/centos/boot.done: \
$(addprefix $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/images/,$(IMAGES_FILES)) \
$(addprefix $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/EFI/BOOT/,$(EFI_FILES)) \
$(addprefix $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/isolinux/,$(ISOLINUX_FILES))
$(ACTION.TOUCH)

View File

@ -1,9 +0,0 @@
# This module downloads required rpm packages and creates rpm repository.
include $(SOURCE_DIR)/mirror/centos/repo.mk
# This module downloads centos installation images.
include $(SOURCE_DIR)/mirror/centos/boot.mk
$(BUILD_DIR)/mirror/centos/build.done: \
$(BUILD_DIR)/mirror/centos/repo.done \
$(BUILD_DIR)/mirror/centos/boot.done
$(ACTION.TOUCH)

View File

@ -1,80 +0,0 @@
include $(SOURCE_DIR)/mirror/centos/yum_repos.mk
.PHONY: show-yum-urls-centos
$(BUILD_DIR)/mirror/centos/etc/yum.conf: $(call depv,yum_conf)
$(BUILD_DIR)/mirror/centos/etc/yum.conf: export contents:=$(yum_conf)
$(BUILD_DIR)/mirror/centos/etc/yum.conf:
mkdir -p $(@D)
/bin/echo -e "$${contents}" > $@
$(BUILD_DIR)/mirror/centos/etc/yum-plugins/priorities.py: \
$(SOURCE_DIR)/mirror/centos/yum-priorities-plugin.py
mkdir -p $(@D)
cp $(SOURCE_DIR)/mirror/centos/yum-priorities-plugin.py $@
$(BUILD_DIR)/mirror/centos/etc/yum/pluginconf.d/priorities.conf:
mkdir -p $(@D)
/bin/echo -e "[main]\nenabled=1\ncheck_obsoletes=1\nfull_match=1" > $@
$(BUILD_DIR)/mirror/centos/etc/yum.repos.d/base.repo: $(call depv,YUM_REPOS)
$(BUILD_DIR)/mirror/centos/etc/yum.repos.d/base.repo: \
export contents:=$(foreach repo,$(YUM_REPOS),\n$(yum_repo_$(repo))\n)
$(BUILD_DIR)/mirror/centos/etc/yum.repos.d/base.repo:
@mkdir -p $(@D)
/bin/echo -e "$${contents}" > $@
$(BUILD_DIR)/mirror/centos/yum-config.done: \
$(BUILD_DIR)/mirror/centos/etc/yum.conf \
$(BUILD_DIR)/mirror/centos/etc/yum.repos.d/base.repo \
$(BUILD_DIR)/mirror/centos/etc/yum-plugins/priorities.py \
$(BUILD_DIR)/mirror/centos/etc/yum/pluginconf.d/priorities.conf
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/centos/yum.done: \
$(BUILD_DIR)/mirror/centos/yum-config.done \
$(SOURCE_DIR)/requirements-rpm.txt
yum -c $(BUILD_DIR)/mirror/centos/etc/yum.conf clean all
rm -rf /var/tmp/yum-$$USER-*/
yumdownloader -q --resolve --archlist=$(CENTOS_ARCH) \
-c $(BUILD_DIR)/mirror/centos/etc/yum.conf \
--destdir=$(LOCAL_MIRROR_CENTOS_OS_BASEURL)/Packages \
$(REQUIRED_RPMS) 2>&1 | tee $(BUILD_DIR)/mirror/centos/yumdownloader.log
# Yumdownloader/repotrack workaround number one:
# i686 packages are downloaded by mistake. Remove them
rm -rf $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/Packages/*.i686.rpm
# Yumdownloader workaround number two:
# yumdownloader should fail if some packages are missed
test `grep "No Match" $(BUILD_DIR)/mirror/centos/yumdownloader.log | wc -l` = 0
# Yumdownloader workaround number three:
# We have exactly two downloading conflicts: django and mysql
test `grep "has depsolving problems" $(BUILD_DIR)/mirror/centos/yumdownloader.log | wc -l` = 2
$(ACTION.TOUCH)
show-yum-urls-centos: \
$(BUILD_DIR)/mirror/centos/yum-config.done \
$(SOURCE_DIR)/requirements-rpm.txt
yum -c $(BUILD_DIR)/mirror/centos/etc/yum.conf clean all
rm -rf /var/tmp/yum-$$USER-*/
yumdownloader --urls -q --resolve --archlist=$(CENTOS_ARCH) \
-c $(BUILD_DIR)/mirror/centos/etc/yum.conf \
--destdir=$(LOCAL_MIRROR_CENTOS_OS_BASEURL)/Packages \
$(REQUIRED_RPMS)
$(LOCAL_MIRROR_CENTOS_OS_BASEURL)/comps.xml: \
export COMPSXML=$(shell wget -qO- $(MIRROR_CENTOS_OS_BASEURL)/repodata/repomd.xml | grep -m 1 '$(@F)' | awk -F'"' '{ print $$2 }')
$(LOCAL_MIRROR_CENTOS_OS_BASEURL)/comps.xml:
@mkdir -p $(@D)
if ( echo $${COMPSXML} | grep -q '\.gz$$' ); then \
wget -O $@.gz $(MIRROR_CENTOS_OS_BASEURL)/$${COMPSXML}; \
gunzip $@.gz; \
else \
wget -O $@ $(MIRROR_CENTOS_OS_BASEURL)/$${COMPSXML}; \
fi
$(BUILD_DIR)/mirror/centos/repo.done: \
$(BUILD_DIR)/mirror/centos/yum.done \
| $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/comps.xml
createrepo -g $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/comps.xml \
-o $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/ $(LOCAL_MIRROR_CENTOS_OS_BASEURL)/
$(ACTION.TOUCH)

View File

@ -1,209 +0,0 @@
#!/usr/bin/python
#
# yum-plugin-priorities 0.0.7
#
# Copyright (c) 2006-2007 Daniel de Kok
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# This plugins is inspired by the protectbase plugin, and enables/disables
# packages based on a repository priority.
#
# You can install this plugin by copying it to /usr/lib/yum-plugins. To
# enable this plugin, make sure that you have 'plugins=1' in /etc/yum.conf,
# and create the file /etc/yum/pluginconf.d/priorities.conf with the
# following content:
#
# [main]
# enabled=1
#
# If you also want the plugin to protect high-priority repositories against
# obsoletes in low-priority repositories, enable the 'check_obsoletes' bool:
#
# check_obsoletes=1
#
# By default, this plugin excludes packages from lower priority repositories
# based on the package name. If you want to exclude packages based only the
# package name and architecture, enable the 'only_samearch' bool:
#
# only_samearch=1
#
# If you want to be able to set package as the fully qualified package name
# including architecture and package version, enable 'full_match' bool:
#
# full_match=1
#
# If full_match is enabled then check_obsoletes will be forced to disable
#
# You can add priorities to repositories, by adding the line:
#
# priority=N
#
# to the repository entry, where N is an integer number. The default
# priority for repositories is 99. The repositories with the lowest
# number have the highest priority.
#
# Please report errors to Daniel de Kok <danieldk@pobox.com>
from yum.constants import *
from yum.plugins import TYPE_CORE
from yum import config
import yum
check_obsoletes = False
only_samearch = False
full_match = False
requires_api_version = '2.1'
plugin_type = (TYPE_CORE,)
def config_hook(conduit):
global check_obsoletes
global only_samearch
global full_match
# Plugin configuration
check_obsoletes = conduit.confBool('main', 'check_obsoletes', default = False)
only_samearch = conduit.confBool('main', 'only_samearch', default = False)
full_match = conduit.confBool('main', 'full_match', default = False)
if full_match:
check_obsoletes = False
# Repo priorities
if yum.__version__ >= '2.5.0':
# New style : yum >= 2.5
config.RepoConf.priority = config.IntOption(99)
else:
# Old add extra options style
conduit.registerOpt('priority', PLUG_OPT_INT, PLUG_OPT_WHERE_REPO, 99)
# Command-line options.
parser = conduit.getOptParser()
if parser:
if hasattr(parser, 'plugin_option_group'):
parser = parser.plugin_option_group
parser.add_option('', '--samearch-priorities', dest='samearch',
action='store_true', default = False,
help="Priority-exclude packages based on name + arch")
def _all_repo_priorities_same(allrepos):
""" Are all repos at the same priority """
first = None
for repo in allrepos:
if first is None:
first = repo.priority
elif first != repo.priority:
return False
return True
def exclude_hook(conduit):
global only_samearch
global check_obsoletes
global full_match
allrepos = conduit.getRepos().listEnabled()
# If they haven't done anything, don't do any work
if _all_repo_priorities_same(allrepos):
return
# Check whether the user specified the --samearch option.
opts, commands = conduit.getCmdLine()
if opts and opts.samearch:
only_samearch = True
cnt = 0
if check_obsoletes and not conduit._base.conf.obsoletes:
check_obsoletes = False
if check_obsoletes:
obsoletes = conduit._base.up.rawobsoletes
# Build a dictionary with package priorities. Either with arch or
# archless, based on the user's settings.
if only_samearch:
pkg_priorities = dict()
if check_obsoletes or not only_samearch:
pkg_priorities_archless = dict()
for repo in allrepos:
if repo.enabled:
if only_samearch:
repopkgs = _pkglist_to_dict(conduit.getPackages(repo), repo.priority, True)
_mergeprioritydicts(pkg_priorities, repopkgs)
if check_obsoletes or not only_samearch:
repopkgs_archless = _pkglist_to_dict(conduit.getPackages(repo), repo.priority)
_mergeprioritydicts(pkg_priorities_archless, repopkgs_archless)
# Eliminate packages that have a low priority
for repo in allrepos:
if repo.enabled:
for po in conduit.getPackages(repo):
delPackage = False
if full_match:
pname = str(po)
else:
pname = po.name
if only_samearch:
key = "%s.%s" % (pname,po.arch)
if key in pkg_priorities and pkg_priorities[key] < repo.priority:
delPackage = True
else:
key = "%s" % pname
if key in pkg_priorities_archless and pkg_priorities_archless[key] < repo.priority:
delPackage = True
if delPackage:
conduit.delPackage(po)
cnt += 1
conduit.info(3," --> %s from %s excluded (priority)" % (po,po.repoid))
# If this packages obsoletes other packages, check whether
# one of the obsoleted packages is not available through
# a repo with a higher priority. If so, remove this package.
if check_obsoletes:
if po.pkgtup in obsoletes:
obsolete_pkgs = obsoletes[po.pkgtup]
for obsolete_pkg in obsolete_pkgs:
pkg_name = obsolete_pkg[0]
if pkg_name in pkg_priorities_archless and pkg_priorities_archless[pkg_name] < repo.priority:
conduit.delPackage(po)
cnt += 1
conduit.info(3," --> %s from %s excluded (priority)" % (po,po.repoid))
break
if cnt:
conduit.info(2, '%d packages excluded due to repository priority protections' % cnt)
if check_obsoletes:
# Atm. the update object doesn't get updated when we manually exclude
# things ... so delete it. This needs to be re-written.
conduit._base.up = None
def _pkglist_to_dict(pl, priority, addArch = False):
global full_match
out = dict()
for p in pl:
if full_match:
pname = str(p)
else:
pname = p.name
if addArch:
key = "%s.%s" % (pname,p.arch)
out[key] = priority
else:
out[pname] = priority
return out
def _mergeprioritydicts(dict1, dict2):
for package in dict2.keys():
if package not in dict1 or dict2[package] < dict1[package]:
dict1[package] = dict2[package]

View File

@ -1,76 +0,0 @@
define yum_conf
[main]
cachedir=$(BUILD_DIR)/mirror/centos/cache
keepcache=0
debuglevel=6
logfile=$(BUILD_DIR)/mirror/centos/yum.log
exclude=*.i686.rpm
exactarch=1
obsoletes=1
gpgcheck=0
plugins=1
pluginpath=$(BUILD_DIR)/mirror/centos/etc/yum-plugins
pluginconfpath=$(BUILD_DIR)/mirror/centos/etc/yum/pluginconf.d
reposdir=$(BUILD_DIR)/mirror/centos/etc/yum.repos.d
endef
define yum_repo_official
[base]
name=CentOS-$(CENTOS_RELEASE) - Base
#mirrorlist=http://mirrorlist.centos.org/?release=$(CENTOS_RELEASE)&arch=$(CENTOS_ARCH)&repo=os
baseurl=$(MIRROR_CENTOS)/os/$(CENTOS_ARCH)
gpgcheck=0
enabled=1
priority=10
[updates]
name=CentOS-$(CENTOS_RELEASE) - Updates
#mirrorlist=http://mirrorlist.centos.org/?release=$(CENTOS_RELEASE)&arch=$(CENTOS_ARCH)&repo=updates
baseurl=$(MIRROR_CENTOS)/updates/$(CENTOS_ARCH)
gpgcheck=0
enabled=1
priority=10
[extras]
name=CentOS-$(CENTOS_RELEASE) - Extras
#mirrorlist=http://mirrorlist.centos.org/?release=$(CENTOS_RELEASE)&arch=$(CENTOS_ARCH)&repo=extras
baseurl=$(MIRROR_CENTOS)/extras/$(CENTOS_ARCH)
gpgcheck=0
enabled=0
priority=10
[centosplus]
name=CentOS-$(CENTOS_RELEASE) - Plus
#mirrorlist=http://mirrorlist.centos.org/?release=$(CENTOS_RELEASE)&arch=$(CENTOS_ARCH)&repo=centosplus
baseurl=$(MIRROR_CENTOS)/centosplus/$(CENTOS_ARCH)
gpgcheck=0
enabled=0
priority=10
[contrib]
name=CentOS-$(CENTOS_RELEASE) - Contrib
#mirrorlist=http://mirrorlist.centos.org/?release=$(CENTOS_RELEASE)&arch=$(CENTOS_ARCH)&repo=contrib
baseurl=$(MIRROR_CENTOS)/contrib/$(CENTOS_ARCH)
gpgcheck=0
enabled=0
priority=10
endef
define yum_repo_fuel
[fuel]
name=Mirantis OpenStack Custom Packages
#mirrorlist=http://download.mirantis.com/epel-fuel-grizzly-3.1/mirror.internal.list
baseurl=$(MIRROR_FUEL)
gpgcheck=0
enabled=1
priority=1
endef
define yum_repo_proprietary
[proprietary]
name = RHEL $(CENTOS_RELEASE) - Proprietary
baseurl = $(MIRROR_CENTOS)/os/$(CENTOS_ARCH)
gpgcheck = 0
enabled = 1
priority=1
endef

View File

@ -1,61 +0,0 @@
.PHONY: clean clean-mirror-eggs mirror-eggs
mirror-eggs: $(BUILD_DIR)/mirror/eggs/build.done
clean: clean-mirror-eggs
clean-mirror-eggs:
-sudo umount $(BUILD_DIR)/mirror/eggs/SANDBOX/proc
-sudo umount $(BUILD_DIR)/mirror/eggs/SANDBOX/dev
sudo rm -rf $(BUILD_DIR)/mirror/eggs
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,LOCAL_MIRROR_EGGS)
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,REQUIRED_EGGS)
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,OSTF_EGGS)
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,SANDBOX_PACKAGES)
$(BUILD_DIR)/mirror/eggs/build.done: SANDBOX:=$(BUILD_DIR)/mirror/eggs/SANDBOX
$(BUILD_DIR)/mirror/eggs/build.done: export SANDBOX_UP:=$(SANDBOX_UP)
$(BUILD_DIR)/mirror/eggs/build.done: export SANDBOX_DOWN:=$(SANDBOX_DOWN)
$(BUILD_DIR)/mirror/eggs/build.done: \
$(BUILD_DIR)/mirror/centos/build.done
mkdir -p $(@D)
sudo sh -c "$${SANDBOX_UP}"
# Creating eggs mirror directory
mkdir -p $(LOCAL_MIRROR_EGGS)
# Avoiding eggs download duplication.
sudo rsync -a --delete $(LOCAL_MIRROR_EGGS) $(SANDBOX)/tmp
# Here we don't know if MIRROR_EGGS
# is a list of links or a correct pip index.
# That is why we use --index-url and --find-links options
# for the same url.
# Installing new version of pip.
sudo chroot $(SANDBOX) pip --version 2>/dev/null | awk '{print $$2}' | grep -qE "^1.2.1$$" || \
sudo chroot $(SANDBOX) pip-python install \
--index-url $(MIRROR_EGGS) \
--find-links $(MIRROR_EGGS) \
pip==1.2.1
# Downloading required pip packages.
sudo chroot $(SANDBOX) pip install \
--exists-action=i \
--index-url $(MIRROR_EGGS) \
--find-links $(MIRROR_EGGS) \
--download /tmp/$(notdir $(LOCAL_MIRROR_EGGS)) \
$(REQUIRED_EGGS)
sudo chroot $(SANDBOX) pip install \
--exists-action=i \
--index-url $(MIRROR_EGGS) \
--find-links $(MIRROR_EGGS) \
--download /tmp/$(notdir $(LOCAL_MIRROR_EGGS)) \
$(OSTF_EGGS)
# # Copying downloaded eggs into eggs mirror
rsync -a $(SANDBOX)/tmp/$(notdir $(LOCAL_MIRROR_EGGS))/ $(LOCAL_MIRROR_EGGS)
sudo sh -c "$${SANDBOX_DOWN}"
$(ACTION.TOUCH)

View File

@ -1,54 +0,0 @@
$(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile: $(call depv,MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile:
mkdir -p $(@D)
echo -n > $@
for i in $(MIRROR_GEMS); do \
echo "source \"$$i\"" >> $@; \
done
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile: $(call depv,MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile: \
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile.lock \
$(BUILD_DIR)/packages/gems/build.done \
$(BUILD_DIR)/packages/rpm/build.done
echo -n > $@
for i in $(MIRROR_GEMS); do \
echo "source \"$$i\"" >> $@; \
done
echo "source \"file://$(BUILD_MIRROR_GEMS)\"" >> $@
echo "gemspec :path => \"$(SOURCE_DIR)/naily\"" >> $@
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile.lock: \
$(SOURCE_DIR)/naily/Gemfile.lock
mkdir -p $(@D)
cp $(SOURCE_DIR)/naily/Gemfile.lock $@
$(BUILD_DIR)/mirror/gems/gems-bundle-gemfile.done: \
$(SOURCE_DIR)/requirements-gems.txt \
$(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile \
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile
mkdir -p $(BUILD_DIR)/mirror/gems/gems-bundle
cat $(SOURCE_DIR)/requirements-gems.txt | while read gem ver; do \
echo "gem \"$${gem}\", \"$${ver}\"" >> $(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile; \
done
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/gems/gems-bundle.done: $(BUILD_DIR)/mirror/gems/gems-bundle-gemfile.done
( cd $(BUILD_DIR)/mirror/gems/gems-bundle && bundle install --path=. && bundle package )
find $(BUILD_DIR)/mirror/gems/gems-bundle/naily \( -name "astute*.gem*" \) -exec rm '{}' \+
( cd $(BUILD_DIR)/mirror/gems/gems-bundle/naily && bundle install --path=. && bundle package )
( cd $(BUILD_DIR)/mirror/gems/gems-bundle/vendor/cache/ && \
gem fetch `for i in $(MIRROR_GEMS); do echo -n "--source $$i "; done` -v 1.3.4 bundler )
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/gems/build.done: $(call depv,LOCAL_MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/build.done: $(call depv,BUILD_MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/build.done: $(BUILD_DIR)/mirror/gems/gems-bundle.done
@mkdir -p $(LOCAL_MIRROR_GEMS)/gems
cp $(BUILD_DIR)/mirror/gems/gems-bundle/vendor/cache/*.gem $(LOCAL_MIRROR_GEMS)/gems
find $(BUILD_DIR)/mirror/gems/gems-bundle/naily/vendor/cache/ \
\( -name "*.gem" -a ! -name "astute*" -a ! -name "mcollective*" -a ! -name "raemon*" \) \
-exec cp '{}' $(LOCAL_MIRROR_GEMS)/gems \;
(cd $(LOCAL_MIRROR_GEMS) && gem generate_index gems)
$(ACTION.TOUCH)

View File

@ -1,25 +0,0 @@
.PHONY: mirror clean clean-mirror
mirror: $(BUILD_DIR)/mirror/build.done
clean: clean-mirror
clean-mirror: clean-mirror-eggs
sudo rm -rf $(BUILD_DIR)/mirror
include $(SOURCE_DIR)/mirror/src/module.mk
include $(SOURCE_DIR)/mirror/centos/module.mk
include $(SOURCE_DIR)/mirror/ubuntu/module.mk
include $(SOURCE_DIR)/mirror/rhel/module.mk
include $(SOURCE_DIR)/mirror/eggs/module.mk
include $(SOURCE_DIR)/mirror/gems/module.mk
$(BUILD_DIR)/mirror/build.done: \
$(BUILD_DIR)/mirror/src/build.done \
$(BUILD_DIR)/mirror/centos/build.done \
$(BUILD_DIR)/mirror/ubuntu/build.done \
$(BUILD_DIR)/mirror/rhel/build.done \
$(BUILD_DIR)/mirror/eggs/build.done \
$(BUILD_DIR)/mirror/gems/build.done
$(ACTION.TOUCH)

View File

@ -1,24 +0,0 @@
ISOLINUX_FILES:=boot.msg grub.conf initrd.img isolinux.bin memtest splash.jpg vesamenu.c32 vmlinuz
IMAGES_FILES:=efiboot.img efidisk.img install.img
EFI_FILES:=BOOTX64.conf BOOTX64.efi splash.xpm.gz
# isolinux files
$(addprefix $(LOCAL_MIRROR_RHEL)/isolinux/,$(ISOLINUX_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_RHEL_BOOT)/isolinux/$(@F)
# EFI boot images
$(addprefix $(LOCAL_MIRROR_RHEL)/EFI/BOOT/,$(EFI_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_RHEL_BOOT)/EFI/BOOT/$(@F)
# boot images
$(addprefix $(LOCAL_MIRROR_RHEL)/images/,$(IMAGES_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_RHEL_BOOT)/images/$(@F)
$(BUILD_DIR)/mirror/rhel/boot.done: \
$(addprefix $(LOCAL_MIRROR_RHEL)/images/,$(IMAGES_FILES)) \
$(addprefix $(LOCAL_MIRROR_RHEL)/EFI/BOOT/,$(EFI_FILES)) \
$(addprefix $(LOCAL_MIRROR_RHEL)/isolinux/,$(ISOLINUX_FILES))
$(ACTION.TOUCH)

View File

@ -1,14 +0,0 @@
ifeq ($(CACHE_RHEL),1)
# This module downloads required rpm packages and creates rpm repository.
include $(SOURCE_DIR)/mirror/rhel/repo.mk
# This module downloads installation images.
include $(SOURCE_DIR)/mirror/rhel/boot.mk
$(BUILD_DIR)/mirror/rhel/build.done: \
$(BUILD_DIR)/mirror/rhel/repo.done \
$(BUILD_DIR)/mirror/rhel/boot.done
$(ACTION.TOUCH)
else
$(BUILD_DIR)/mirror/rhel/build.done:
$(ACTION.TOUCH)
endif

View File

@ -1,87 +0,0 @@
include $(SOURCE_DIR)/mirror/rhel/yum_repos.mk
.PHONY: show-yum-urls-rhel
$(BUILD_DIR)/mirror/rhel/etc/yum.conf: $(call depv,rhel_yum_conf)
$(BUILD_DIR)/mirror/rhel/etc/yum.conf: export contents:=$(rhel_yum_conf)
$(BUILD_DIR)/mirror/rhel/etc/yum.conf:
mkdir -p $(@D)
/bin/echo -e "$${contents}" > $@
$(BUILD_DIR)/mirror/rhel/etc/yum-plugins/priorities.py: \
$(SOURCE_DIR)/mirror/rhel/yum-priorities-plugin.py
mkdir -p $(@D)
cp $(SOURCE_DIR)/mirror/rhel/yum-priorities-plugin.py $@
$(BUILD_DIR)/mirror/rhel/etc/yum/pluginconf.d/priorities.conf:
mkdir -p $(@D)
/bin/echo -e "[main]\nenabled=1\ncheck_obsoletes=1\nfull_match=1" > $@
$(BUILD_DIR)/mirror/rhel/etc/yum.repos.d/base.repo: $(call depv,YUM_REPOS)
$(BUILD_DIR)/mirror/rhel/etc/yum.repos.d/base.repo: \
export contents:=$(foreach repo,$(YUM_REPOS),\n$(rhel_yum_repo_$(repo))\n)
$(BUILD_DIR)/mirror/rhel/etc/yum.repos.d/base.repo:
@mkdir -p $(@D)
/bin/echo -e "$${contents}" > $@
$(BUILD_DIR)/mirror/rhel/yum-config.done: \
$(BUILD_DIR)/mirror/rhel/etc/yum.conf \
$(BUILD_DIR)/mirror/rhel/etc/yum.repos.d/base.repo \
$(BUILD_DIR)/mirror/rhel/etc/yum-plugins/priorities.py \
$(BUILD_DIR)/mirror/rhel/etc/yum/pluginconf.d/priorities.conf
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/rhel/yum.done: $(call depv,REQ_RHEL_RPMS)
$(BUILD_DIR)/mirror/rhel/yum.done: \
$(BUILD_DIR)/mirror/rhel/yum-config.done
yum -c $(BUILD_DIR)/mirror/rhel/etc/yum.conf clean all
rm -rf /var/tmp/yum-$$USER-*/
yumdownloader -q --resolve --archlist=$(CENTOS_ARCH) \
-c $(BUILD_DIR)/mirror/rhel/etc/yum.conf \
--destdir=$(LOCAL_MIRROR_RHEL)/Packages \
`echo $(REQ_RHEL_RPMS) | /bin/sed 's/-[0-9][0-9\.a-zA-Z_-]\+//g'`
$(ACTION.TOUCH)
show-yum-urls-rhel: $(call depv,REQ_RHEL_RPMS)
show-yum-urls-rhel: \
$(BUILD_DIR)/mirror/rhel/yum-config.done
yum -c $(BUILD_DIR)/mirror/rhel/etc/yum.conf clean all
rm -rf /var/tmp/yum-$$USER-*/
yumdownloader --urls -q --resolve --archlist=$(CENTOS_ARCH) \
-c $(BUILD_DIR)/mirror/rhel/etc/yum.conf \
--destdir=$(LOCAL_MIRROR_RHEL)/Packages \
`echo $(REQ_RHEL_RPMS) | /bin/sed 's/-[0-9][0-9\.a-zA-Z_-]\+//g'`
$(LOCAL_MIRROR_RHEL)/comps.xml: \
export COMPSXML=$(shell wget -qO- $(MIRROR_RHEL)/repodata/repomd.xml | grep -m 1 '$(@F)' | awk -F'"' '{ print $$2 }')
$(LOCAL_MIRROR_RHEL)/comps.xml:
@mkdir -p $(@D)
if ( echo $${COMPSXML} | grep -q '\.gz$$' ); then \
wget -O $@.gz $(MIRROR_RHEL)/$${COMPSXML}; \
gunzip $@.gz; \
else \
wget -O $@ $(MIRROR_RHEL)/$${COMPSXML}; \
fi
# These packages are used by FUEL but RHEL repo doesn't contain them. So we
# need to download them from some external repo and put into rhel/fuel repo.
HACK_PACKAGES:=xinetd-2.3.14-38.el6.x86_64.rpm xfsprogs-3.1.1-10.el6.x86_64.rpm qpid-cpp-server-cluster-0.14-22.el6_3.x86_64.rpm \
qpid-cpp-server-store-0.14-22.el6_3.x86_64.rpm qpid-tests-0.14-1.el6_2.noarch.rpm qpid-tools-0.14-6.el6_3.noarch.rpm \
qpid-cpp-server-ssl-0.14-22.el6_3.x86_64.rpm
HACK_URLS:=$(addprefix http://mirror.yandex.ru/centos/6.4/os/x86_64/Packages/,$(HACK_PACKAGES))
$(BUILD_DIR)/mirror/rhel/fuel.done:
mkdir -p $(LOCAL_MIRROR)/mirror/rhel/fuel/Packages
-wget -c -i $(SOURCE_DIR)/puppet/rpmcache/files/req-fuel-rhel.txt -B http://download.mirantis.com/epel-fuel-grizzly/x86_64/ -P $(LOCAL_MIRROR)/rhel/fuel/Packages
-wget -c -i $(SOURCE_DIR)/puppet/rpmcache/files/req-fuel-rhel.txt -B http://download.mirantis.com/epel-fuel-grizzly/noarch/ -P $(LOCAL_MIRROR)/rhel/fuel/Packages
-wget -c -i $(SOURCE_DIR)/puppet/rpmcache/files/req-fuel-rhel.txt -B http://srv11-msk.msk.mirantis.net/rhel6/fuel-rpms/x86_64/ -P $(LOCAL_MIRROR)/rhel/fuel/Packages
-wget -c -P $(LOCAL_MIRROR)/rhel/fuel/Packages $(HACK_URLS)
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/rhel/repo.done: \
$(BUILD_DIR)/mirror/rhel/yum.done \
$(BUILD_DIR)/mirror/rhel/fuel.done \
| $(LOCAL_MIRROR_RHEL)/comps.xml
createrepo -g $(LOCAL_MIRROR_RHEL)/comps.xml \
-o $(LOCAL_MIRROR_RHEL)/ $(LOCAL_MIRROR_RHEL)/
$(ACTION.TOUCH)

View File

@ -1,209 +0,0 @@
#!/usr/bin/python
#
# yum-plugin-priorities 0.0.7
#
# Copyright (c) 2006-2007 Daniel de Kok
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# This plugins is inspired by the protectbase plugin, and enables/disables
# packages based on a repository priority.
#
# You can install this plugin by copying it to /usr/lib/yum-plugins. To
# enable this plugin, make sure that you have 'plugins=1' in /etc/yum.conf,
# and create the file /etc/yum/pluginconf.d/priorities.conf with the
# following content:
#
# [main]
# enabled=1
#
# If you also want the plugin to protect high-priority repositories against
# obsoletes in low-priority repositories, enable the 'check_obsoletes' bool:
#
# check_obsoletes=1
#
# By default, this plugin excludes packages from lower priority repositories
# based on the package name. If you want to exclude packages based only the
# package name and architecture, enable the 'only_samearch' bool:
#
# only_samearch=1
#
# If you want to be able to set package as the fully qualified package name
# including architecture and package version, enable 'full_match' bool:
#
# full_match=1
#
# If full_match is enabled then check_obsoletes will be forced to disable
#
# You can add priorities to repositories, by adding the line:
#
# priority=N
#
# to the repository entry, where N is an integer number. The default
# priority for repositories is 99. The repositories with the lowest
# number have the highest priority.
#
# Please report errors to Daniel de Kok <danieldk@pobox.com>
from yum.constants import *
from yum.plugins import TYPE_CORE
from yum import config
import yum
check_obsoletes = False
only_samearch = False
full_match = False
requires_api_version = '2.1'
plugin_type = (TYPE_CORE,)
def config_hook(conduit):
global check_obsoletes
global only_samearch
global full_match
# Plugin configuration
check_obsoletes = conduit.confBool('main', 'check_obsoletes', default = False)
only_samearch = conduit.confBool('main', 'only_samearch', default = False)
full_match = conduit.confBool('main', 'full_match', default = False)
if full_match:
check_obsoletes = False
# Repo priorities
if yum.__version__ >= '2.5.0':
# New style : yum >= 2.5
config.RepoConf.priority = config.IntOption(99)
else:
# Old add extra options style
conduit.registerOpt('priority', PLUG_OPT_INT, PLUG_OPT_WHERE_REPO, 99)
# Command-line options.
parser = conduit.getOptParser()
if parser:
if hasattr(parser, 'plugin_option_group'):
parser = parser.plugin_option_group
parser.add_option('', '--samearch-priorities', dest='samearch',
action='store_true', default = False,
help="Priority-exclude packages based on name + arch")
def _all_repo_priorities_same(allrepos):
""" Are all repos at the same priority """
first = None
for repo in allrepos:
if first is None:
first = repo.priority
elif first != repo.priority:
return False
return True
def exclude_hook(conduit):
global only_samearch
global check_obsoletes
global full_match
allrepos = conduit.getRepos().listEnabled()
# If they haven't done anything, don't do any work
if _all_repo_priorities_same(allrepos):
return
# Check whether the user specified the --samearch option.
opts, commands = conduit.getCmdLine()
if opts and opts.samearch:
only_samearch = True
cnt = 0
if check_obsoletes and not conduit._base.conf.obsoletes:
check_obsoletes = False
if check_obsoletes:
obsoletes = conduit._base.up.rawobsoletes
# Build a dictionary with package priorities. Either with arch or
# archless, based on the user's settings.
if only_samearch:
pkg_priorities = dict()
if check_obsoletes or not only_samearch:
pkg_priorities_archless = dict()
for repo in allrepos:
if repo.enabled:
if only_samearch:
repopkgs = _pkglist_to_dict(conduit.getPackages(repo), repo.priority, True)
_mergeprioritydicts(pkg_priorities, repopkgs)
if check_obsoletes or not only_samearch:
repopkgs_archless = _pkglist_to_dict(conduit.getPackages(repo), repo.priority)
_mergeprioritydicts(pkg_priorities_archless, repopkgs_archless)
# Eliminate packages that have a low priority
for repo in allrepos:
if repo.enabled:
for po in conduit.getPackages(repo):
delPackage = False
if full_match:
pname = str(po)
else:
pname = po.name
if only_samearch:
key = "%s.%s" % (pname,po.arch)
if key in pkg_priorities and pkg_priorities[key] < repo.priority:
delPackage = True
else:
key = "%s" % pname
if key in pkg_priorities_archless and pkg_priorities_archless[key] < repo.priority:
delPackage = True
if delPackage:
conduit.delPackage(po)
cnt += 1
conduit.info(3," --> %s from %s excluded (priority)" % (po,po.repoid))
# If this packages obsoletes other packages, check whether
# one of the obsoleted packages is not available through
# a repo with a higher priority. If so, remove this package.
if check_obsoletes:
if po.pkgtup in obsoletes:
obsolete_pkgs = obsoletes[po.pkgtup]
for obsolete_pkg in obsolete_pkgs:
pkg_name = obsolete_pkg[0]
if pkg_name in pkg_priorities_archless and pkg_priorities_archless[pkg_name] < repo.priority:
conduit.delPackage(po)
cnt += 1
conduit.info(3," --> %s from %s excluded (priority)" % (po,po.repoid))
break
if cnt:
conduit.info(2, '%d packages excluded due to repository priority protections' % cnt)
if check_obsoletes:
# Atm. the update object doesn't get updated when we manually exclude
# things ... so delete it. This needs to be re-written.
conduit._base.up = None
def _pkglist_to_dict(pl, priority, addArch = False):
global full_match
out = dict()
for p in pl:
if full_match:
pname = str(p)
else:
pname = p.name
if addArch:
key = "%s.%s" % (pname,p.arch)
out[key] = priority
else:
out[pname] = priority
return out
def _mergeprioritydicts(dict1, dict2):
for package in dict2.keys():
if package not in dict1 or dict2[package] < dict1[package]:
dict1[package] = dict2[package]

View File

@ -1,62 +0,0 @@
define rhel_yum_conf
[main]
cachedir=$(BUILD_DIR)/mirror/rhel/cache
keepcache=0
debuglevel=6
logfile=$(BUILD_DIR)/mirror/rhel/yum.log
exclude=*.i686.rpm
exactarch=1
obsoletes=1
gpgcheck=0
plugins=1
pluginpath=$(BUILD_DIR)/mirror/rhel/etc/yum-plugins
pluginconfpath=$(BUILD_DIR)/mirror/rhel/etc/yum/pluginconf.d
reposdir=$(BUILD_DIR)/mirror/rhel/etc/yum.repos.d
endef
define rhel_yum_repo_rhel
[rhel-os-30-puddle]
name=OpenStack-3.0-Puddle
baseurl=http://srv11-msk.msk.mirantis.net/rhel6/OpenStack-3.0-Puddle
gpgcheck=0
enabled=1
[rhel-server-rpms]
name=rhel-6-server-rpms
baseurl=http://srv11-msk.msk.mirantis.net/rhel6/rhel-6-server-rpms
gpgcheck=0
enabled=1
[rhel-server-optional-rpms]
name=rhel-6-server-optional-rpms
baseurl=http://srv11-msk.msk.mirantis.net/rhel6/rhel-6-server-optional-rpms
gpgcheck=0
enabled=1
[rhel-ha-rpms]
name=rhel-ha-for-rhel-6-server-rpms
baseurl=http://srv11-msk.msk.mirantis.net/rhel6/rhel-ha-for-rhel-6-server-rpms
gpgcheck=0
enabled=1
[rhel-lb-rpms]
name=rhel-lb-for-rhel-6-server-rpms
baseurl=http://srv11-msk.msk.mirantis.net/rhel6/rhel-lb-for-rhel-6-server-rpms
gpgcheck=0
enabled=1
[rhel-rs-rpms]
name=rhel-rs-for-rhel-6-server-rpms
baseurl=http://srv11-msk.msk.mirantis.net/rhel6/rhel-rs-for-rhel-6-server-rpms
gpgcheck=0
enabled=1
endef
define rhel_yum_repo_proprietary
[proprietary]
name = RHEL $(CENTOS_RELEASE) - Proprietary
baseurl = $(MIRROR_RHEL)
gpgcheck = 0
enabled = 1
priority=1
endef

View File

@ -1,22 +0,0 @@
$(addprefix $(LOCAL_MIRROR_SRC)/, $(notdir $(REQUIRED_SRCS))):
@mkdir -p $(LOCAL_MIRROR_SRC)
ifeq ($(MIRROR_SRC),internet)
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) $(shell echo $(REQUIRED_SRCS) | grep $(notdir $@))
else
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) $(MIRROR_SRC)/$(notdir $@)
endif
#
# Download ostf packages directly from the github, because
# it updates often and we don't want to update main mirrors
# on each commit
#
$(LOCAL_MIRROR_SRC)/$(OSTF_TESTS_SHA).zip:
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) https://github.com/Mirantis/fuel-ostf-tests/archive/$(OSTF_TESTS_SHA).zip
$(LOCAL_MIRROR_SRC)/$(OSTF_PLUGIN_SHA).zip:
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) https://github.com/Mirantis/fuel-ostf-plugin/archive/$(OSTF_PLUGIN_SHA).zip
$(BUILD_DIR)/mirror/src/build.done: $(SOURCE_DIR)/requirements-src.txt \
| $(addprefix $(LOCAL_MIRROR_SRC)/, $(notdir $(REQUIRED_SRCS)) $(OSTF_TESTS_SHA).zip $(OSTF_PLUGIN_SHA).zip)
$(ACTION.TOUCH)

View File

@ -1,11 +0,0 @@
ISOLINUX_FILES:=netboot.tar.gz
# debian isolinux files
$(addprefix $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/installer-amd64/current/images/netboot/,$(ISOLINUX_FILES)):
@mkdir -p $(@D)
wget -O $@ $(MIRROR_UBUNTU_OS_BASEURL)/installer-amd64/current/images/netboot/$(@F)
tar -xzf $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/installer-amd64/current/images/netboot/$(@F) -C $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/installer-amd64/current/images/netboot/
$(BUILD_DIR)/mirror/ubuntu/boot.done: \
$(addprefix $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/installer-amd64/current/images/netboot/,$(ISOLINUX_FILES))
$(ACTION.TOUCH)

View File

@ -1,13 +0,0 @@
$(BUILD_DIR)/mirror/ubuntu/createchroot.done:
mkdir -p $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot
# sudo debootstrap --components=main,universe $(UBUNTU_RELEASE) $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot $(shell echo $(MIRROR_UBUNTU) | sed 's/.dists\///g')
sudo debootstrap --components=main,universe $(UBUNTU_RELEASE) $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot http://mirror.yandex.ru/ubuntu
# echo deb $(MIRROR_FUEL_UBUNTU) $(UBUNTU_RELEASE) main | sudo tee $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/etc/apt/sources.list.d/mirantis.list
echo deb http://download.mirantis.com/precise-grizzly-fuel-3.2 $(UBUNTU_RELEASE) main | sudo tee $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/etc/apt/sources.list.d/mirantis.list
echo 'APT::Get::AllowUnauthenticated 1;' | sudo tee $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/etc/apt/apt.conf.d/02mirantis-unauthenticated
sudo chroot $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot /bin/bash -c "apt-get -y install reprepro && apt-get -dy install $(shell echo $(REQUIRED_DEBS) | tr '\n' ' ')"
sudo mkdir -p $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/repo/conf
sudo cp -a $(SOURCE_DIR)/mirror/ubuntu/mkrepo.sh $(SOURCE_DIR)/mirror/ubuntu/distributions $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/repo/conf
sudo chroot $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot /bin/bash -c "chmod +x /repo/conf/mkrepo.sh && /repo/conf/mkrepo.sh"
sudo mv $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/repo/* $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/ && sudo rm -rf $(LOCAL_MIRROR_UBUNTU_OS_BASEURL)/chroot/
$(ACTION.TOUCH)

View File

@ -1,7 +0,0 @@
Origin: Mirantis
Label: Mirantis
Codename: precise
Architectures: i386 amd64
Components: main restricted universe
Description: Mirantis local package mirror
UDebComponents: main

View File

@ -1,5 +0,0 @@
#!/bin/bash
cd /repo
for deb in $(find /var/cache/apt -name \*deb); do
reprepro includedeb precise $deb
done

View File

@ -1,8 +0,0 @@
# This module downloads ubuntu installation images.
include $(SOURCE_DIR)/mirror/ubuntu/boot.mk
include $(SOURCE_DIR)/mirror/ubuntu/createchroot.mk
$(BUILD_DIR)/mirror/ubuntu/build.done: \
$(BUILD_DIR)/mirror/ubuntu/boot.done \
$(BUILD_DIR)/mirror/ubuntu/createchroot.done
$(ACTION.TOUCH)

View File

@ -1,25 +0,0 @@
NAILGUN_VERSION:=$(shell python -c "import sys; sys.path.insert(0, '$(SOURCE_DIR)/nailgun'); import setup; print setup.version")
$(BUILD_DIR)/packages/eggs/Nailgun-$(NAILGUN_VERSION).tar.gz: $(call depv,NO_UI_OPTIMIZE) \
$(call find-files,$(SOURCE_DIR)/nailgun)
ifeq ($(NO_UI_OPTIMIZE),0)
mkdir -p $(BUILD_DIR)/packages/eggs
cp -r $(SOURCE_DIR)/nailgun $(BUILD_DIR)/packages/eggs
cd $(SOURCE_DIR)/nailgun && \
r.js -o build.js dir=$(BUILD_DIR)/packages/eggs/nailgun/static
rm -rf $(BUILD_DIR)/packages/eggs/nailgun/static/templates
rm -f $(BUILD_DIR)/packages/eggs/nailgun/static/build.txt
find $(BUILD_DIR)/packages/eggs/nailgun/static/css -type f ! -name main.css -delete
find $(BUILD_DIR)/packages/eggs/nailgun/static/js -type f ! -name main.js -and ! -name require.js -delete
cd $(BUILD_DIR)/packages/eggs/nailgun && \
python setup.py sdist --dist-dir $(BUILD_DIR)/packages/eggs
else
cd $(SOURCE_DIR)/nailgun && \
python setup.py sdist --dist-dir $(BUILD_DIR)/packages/eggs
endif
test-unit: test-unit-nailgun
.PHONY: test-unit test-unit-nailgun
test-unit-nailgun:
cd $(SOURCE_DIR)/nailgun && ./run_tests.sh

View File

@ -37,9 +37,9 @@ class FuelKeyHandler(JSONHandler):
:http: * 200 (OK)
"""
key_data = {
"sha": settings.COMMIT_SHA,
"release": settings.PRODUCT_VERSION,
"uuid": settings.FUEL_KEY
"sha": settings.VERSION['nailgun_sha'],
"release": settings.VERSION['release'],
"uuid": settings.VERSION['fuel_key']
}
signature = base64.b64encode(json.dumps(key_data))
key_data["signature"] = signature

View File

@ -33,7 +33,10 @@ class VersionHandler(JSONHandler):
:http: * 200 (OK)
"""
return {
"sha": str(settings.COMMIT_SHA),
"release": str(settings.PRODUCT_VERSION),
"fuel_sha": str(settings.FUEL_COMMIT_SHA)
"release": str(settings.VERSION['release']),
"nailgun_sha": str(settings.VERSION['nailgun_sha']),
"astute_sha": str(settings.VERSION['astute_sha']),
"fuellib_sha": str(settings.VERSION['fuellib_sha']),
"ostf_tests_sha": str(settings.VERSION['ostf_tests_sha']),
"ostf_plugin_sha": str(settings.VERSION['ostf_plugin_sha'])
}

View File

@ -2,10 +2,14 @@ LISTEN_ADDRESS: "0.0.0.0"
LISTEN_PORT: "8000"
DEVELOPMENT: 1
COMMIT_SHA: "Unknown build"
FUEL_COMMIT_SHA: "Unknown build"
PRODUCT_VERSION: "3.0"
FUEL_KEY: "Unknown"
VERSION:
release: "3.0"
fuel_key: "Unknown"
nailgun_sha: "Unknown build"
astute_sha: "Unknown build"
fuellib_sha: "Unknown build"
ostf_tests_sha: "Unknown build"
ostf_plugin_sha: "Unknown build"
FIXTURES_TO_UPLOAD:
- "openstack.json"

View File

@ -24,15 +24,21 @@ from nailgun.test.base import reverse
class TestFuelKeyHandler(BaseTestCase):
@patch('nailgun.api.handlers.version.settings.PRODUCT_VERSION', '0.1b')
@patch('nailgun.api.handlers.version.settings.COMMIT_SHA', '12345')
@patch('nailgun.api.handlers.version.settings.FUEL_KEY', 'uuid')
@patch('nailgun.api.handlers.version.settings.VERSION', {
'release': '0.1b',
'nailgun_sha': '12345',
'fuel_key': 'uuid'
})
def test_version_handler(self):
resp = self.app.get(
reverse('FuelKeyHandler'),
headers=self.default_headers
)
key_data = {"release": "0.1b", "sha": "12345", "uuid": "uuid"}
key_data = {
"sha": "12345",
"release": "0.1b",
"uuid": "uuid"
}
signature = base64.b64encode(json.dumps(key_data))
key_data["signature"] = signature

View File

@ -23,8 +23,14 @@ from nailgun.test.base import reverse
class TestVersionHandler(BaseIntegrationTest):
@patch('nailgun.api.handlers.version.settings.PRODUCT_VERSION', '0.1b')
@patch('nailgun.api.handlers.version.settings.COMMIT_SHA', '12345')
@patch('nailgun.api.handlers.version.settings.VERSION', {
'release': '0.1b',
'nailgun_sha': '12345',
"astute_sha": "Unknown build",
"fuellib_sha": "Unknown build",
"ostf_tests_sha": "Unknown build",
"ostf_plugin_sha": "Unknown build"
})
def test_version_handler(self):
resp = self.app.get(
reverse('VersionHandler'),
@ -33,5 +39,12 @@ class TestVersionHandler(BaseIntegrationTest):
self.assertEqual(200, resp.status)
self.assertEqual(
json.loads(resp.body),
{"release": "0.1b", "sha": "12345", "fuel_sha": "Unknown build"}
{
"release": "0.1b",
"nailgun_sha": "12345",
"astute_sha": "Unknown build",
"fuellib_sha": "Unknown build",
"ostf_tests_sha": "Unknown build",
"ostf_plugin_sha": "Unknown build"
}
)

Some files were not shown because too many files have changed in this diff Show More