Fuel-agent functional tests refactoring

Removed fuel-agent dependecy from functional tests.

Change-Id: Ia58d828ecd02ee22acd4e21cf83009108ec86e1e
This commit is contained in:
Vladimir Kozhukalov 2014-09-18 17:09:38 +04:00
parent 42632710f5
commit 396ab8a7b4
26 changed files with 1220 additions and 439 deletions

View File

@ -4,7 +4,7 @@ network-interfaces: |
iface {{ common.admin_iface_name|default("eth0") }} inet static
address {{ common.admin_ip }}
# network 192.168.1.0
netmask {{ common.admin_mask }}
netmask {{ common.admin_mask }}
# broadcast 192.168.1.255
# gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -4,7 +4,7 @@ network-interfaces: |
iface {{ common.admin_iface_name|default("eth0") }} inet static
address {{ common.admin_ip }}
# network 192.168.1.0
netmask {{ common.admin_mask }}
netmask {{ common.admin_mask }}
# broadcast 192.168.1.255
# gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -30,6 +30,7 @@ class Driver(object):
'ssh_status': fabric_driver,
'ssh_put_content': fabric_driver,
'ssh_put_file': fabric_driver,
'ssh_get_file': fabric_driver,
'ssh_run': fabric_driver,
# these methods are from libvirt_driver

View File

@ -95,6 +95,10 @@ def ssh_put_file(*args, **kwargs):
raise NotImplementedError
def ssh_get_file(*args, **kwargs):
raise NotImplementedError
def ssh_run(*args, **kwargs):
raise NotImplementedError

View File

@ -28,7 +28,7 @@ def ssh_status(ssh):
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.timeout):
timeout=ssh.connection_timeout):
try:
with fab.hide('running', 'stdout', 'stderr'):
fab.run('echo')
@ -47,7 +47,7 @@ def ssh_put_content(ssh, file_content, remote_filename):
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.timeout):
timeout=ssh.connection_timeout):
with tempfile.NamedTemporaryFile() as f:
f.write(file_content)
try:
@ -67,7 +67,7 @@ def ssh_put_file(ssh, filename, remote_filename):
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.timeout):
timeout=ssh.connection_timeout):
try:
fab.put(filename, remote_filename)
except SystemExit:
@ -78,13 +78,31 @@ def ssh_put_file(ssh, filename, remote_filename):
raise
def ssh_get_file(ssh, remote_filename, filename):
LOG.debug('Trying to get file from remote host: '
'local=%s remote=%s' % (filename, remote_filename))
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
fab.get(remote_filename, filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while getting file from remote host: '
'local=%s remote=%s' % (filename, remote_filename))
raise
def ssh_run(ssh, command, command_timeout=10):
LOG.debug('Trying to run command on remote host: %s' % command)
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.timeout,
timeout=ssh.connection_timeout,
command_timeout=command_timeout,
warn_only=True):
try:

View File

@ -430,7 +430,8 @@ def vm_start(vm, drv=None):
interfaces.append({
'type': 'network',
'source_network': vm.env.name + '_' + interface.network,
'mac_address': interface.mac
'mac_address': interface.mac,
'model_type': interface.model_type
})
LOG.debug('Defining vm %s' % vm.name)
drv.define(vmname, boot=vm.boot, disks=disks, interfaces=interfaces)

View File

@ -23,13 +23,15 @@ LOG = logging.getLogger(__name__)
class Ssh(Object):
__typename__ = 'ssh'
def __init__(self, env, name, host, key_filename, user='root', timeout=5):
def __init__(self, env, name, host, key_filename, user='root',
connection_timeout=5, command_timeout=10):
self.env = env
self.name = name
self.host = host
self.user = user
self.key_filename = key_filename
self.timeout = timeout
self.connection_timeout = int(connection_timeout)
self.command_timeout = int(command_timeout)
def status(self):
status = self.env.driver.ssh_status(self)
@ -43,6 +45,13 @@ class Ssh(Object):
else:
raise Exception('Wrong ssh status: %s' % self.name)
def get_file(self, remote_filename, filename):
if self.status():
LOG.debug('Getting file %s' % self.name)
self.env.driver.ssh_get_file(self, remote_filename, filename)
else:
raise Exception('Wrong ssh status: %s' % self.name)
def put_file(self, filename, remote_filename):
if self.status():
LOG.debug('Putting file %s' % self.name)
@ -50,16 +59,18 @@ class Ssh(Object):
else:
raise Exception('Wrong ssh status: %s' % self.name)
def run(self, command, command_timeout=10):
def run(self, command, command_timeout=None):
if self.status():
LOG.debug('Running command %s' % self.name)
return self.env.driver.ssh_run(self, command, command_timeout)
return self.env.driver.ssh_run(
self, command, command_timeout or self.command_timeout)
raise Exception('Wrong ssh status: %s' % self.name)
def wait(self, timeout=200):
begin_time = time.time()
# this loop does not have sleep statement
# because it relies on self.timeout which is by default 5 seconds
# because it relies on self.connection_timeout
# which is by default 5 seconds
while time.time() - begin_time < timeout:
if self.status():
return True

View File

@ -62,9 +62,10 @@ class Vm(Object):
class Interface(object):
def __init__(self, mac, network):
def __init__(self, mac, network, model_type='e1000'):
self.mac = mac
self.network = network
self.model_type = model_type
class Disk(object):

View File

@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
import json
import os
import sys
import time
try:
from unittest.case import TestCase
except ImportError:
@ -21,332 +25,81 @@ except ImportError:
from unittest2.case import TestCase
import yaml
from fuel_agent import manager as fa_manager
from fuel_agent_ci.objects import environment
from fuel_agent_ci import utils
FUEL_AGENT_REPO_NAME = 'fuel_agent'
FUEL_AGENT_HTTP_NAME = 'http'
FUEL_AGENT_NET_NAME = 'net'
FUEL_AGENT_DHCP_NAME = 'dhcp'
FUEL_AGENT_CI_ENVIRONMENT_FILE = 'samples/ci_environment.yaml'
SSH_COMMAND_TIMEOUT = 150
CEPH_JOURNAL = {
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
}
CEPH_DATA = {
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
# FIXME(kozhukalov) it is better to set this as command line arg
ENV_FILE = os.path.join(os.path.dirname(__file__),
'../../samples/ci_environment.yaml')
class BaseFuelAgentCITest(TestCase):
FUEL_AGENT_REPO_NAME = 'fuel_agent'
FUEL_AGENT_HTTP_NAME = 'http'
FUEL_AGENT_NET_NAME = 'net'
FUEL_AGENT_DHCP_NAME = 'dhcp'
FUEL_AGENT_SSH_NAME = 'vm'
FUEL_AGENT_TEMPLATE_PATH = '/usr/share/fuel-agent/cloud-init-templates'
def setUp(self):
super(BaseFuelAgentCITest, self).setUp()
with open(FUEL_AGENT_CI_ENVIRONMENT_FILE) as f:
# Starting environment
with open(ENV_FILE) as f:
ENV_DATA = (yaml.load(f.read()))
self.env = environment.Environment.new(**ENV_DATA)
self.env.start()
self.name = ENV_DATA['vm'][0]['name']
repo_obj = self.env.repo_by_name(FUEL_AGENT_REPO_NAME)
tgz_name = '%s.tar.gz' % repo_obj.name
utils.execute('tar czf %s %s' % (tgz_name,
os.path.join(self.env.envdir,
repo_obj.path)))
self.env.ssh_by_name(self.name).wait()
self.env.ssh_by_name(self.name).put_file(
tgz_name, os.path.join('/tmp', tgz_name))
self.env.ssh_by_name(self.name).run(
'tar xf %s' % os.path.join('/tmp', tgz_name),
command_timeout=SSH_COMMAND_TIMEOUT)
self.env.ssh_by_name(self.name).run(
'pip install setuptools --upgrade',
command_timeout=SSH_COMMAND_TIMEOUT)
self.env.ssh_by_name(self.name).run(
'cd /root/var/tmp/fuel_agent_ci/fuel_agent/fuel_agent; '
#FIXME(agordeev): ^ don't hardcode path
'python setup.py install', command_timeout=SSH_COMMAND_TIMEOUT)
self.http_obj = self.env.http_by_name(FUEL_AGENT_HTTP_NAME)
self.dhcp_hosts = self.env.dhcp_by_name(FUEL_AGENT_DHCP_NAME).hosts
self.net = self.env.net_by_name(FUEL_AGENT_NET_NAME)
p_data = get_filled_provision_data(self.dhcp_hosts[0]['ip'],
self.dhcp_hosts[0]['mac'],
self.net.ip, self.http_obj.port)
self.env.ssh_by_name(self.name).put_content(
json.dumps(p_data), os.path.join('/tmp', 'provision.json'))
self.mgr = fa_manager.Manager(p_data)
self.repo = self.env.repo_by_name(self.FUEL_AGENT_REPO_NAME)
self.ssh = self.env.ssh_by_name(self.FUEL_AGENT_SSH_NAME)
self.http = self.env.http_by_name(self.FUEL_AGENT_HTTP_NAME)
self.dhcp_hosts = self.env.dhcp_by_name(self.FUEL_AGENT_DHCP_NAME).hosts
self.net = self.env.net_by_name(self.FUEL_AGENT_NET_NAME)
self.ssh.wait()
self._upgrade_fuel_agent()
def _upgrade_fuel_agent(self):
"""This method is to be deprecated when artifact
based build system is ready.
"""
src_dir = os.path.join(self.env.envdir, self.repo.path, 'fuel_agent')
package_name = 'fuel-agent-0.1.0.tar.gz'
# Building fuel-agent pip package
utils.execute('python setup.py sdist', cwd=src_dir)
# Putting fuel-agent pip package on a node
self.ssh.put_file(
os.path.join(src_dir, 'dist', package_name),
os.path.join('/tmp', package_name))
# Installing fuel_agent pip package
self.ssh.run('pip install --upgrade %s' %
os.path.join('/tmp', package_name))
# Copying fuel_agent templates
self.ssh.run('mkdir -p %s' % self.FUEL_AGENT_TEMPLATE_PATH)
for f in os.listdir(
os.path.join(src_dir, 'cloud-init-templates')):
if f.endswith('.jinja2'):
self.ssh.put_file(
os.path.join(src_dir, 'cloud-init-templates', f),
os.path.join(self.FUEL_AGENT_TEMPLATE_PATH, f))
self.ssh.put_file(
os.path.join(src_dir, 'etc/fuel-agent/fuel-agent.conf.sample'),
'/etc/fuel-agent/fuel-agent.conf')
def tearDown(self):
super(BaseFuelAgentCITest, self).tearDown()
self.env.stop()
def get_filled_provision_data_for_ceph(ip, mac, master_ip, port=8888,
profile='ubuntu'):
data = get_filled_provision_data(ip, mac, master_ip, port, profile)
#FIXME(agordeev): expecting 3 disk devices at least
for i in range(0, 3):
data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_JOURNAL)
data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(CEPH_DATA)
return data
def get_filled_provision_data(ip, mac, master_ip, port=8888, profile='ubuntu'):
return {
"profile": "ubuntu_1204_x86_64",
"name_servers_search": "\"domain.tld\"",
"uid": "1",
"interfaces": {
"eth0": {
"ip_address": ip,
"dns_name": "node-1.domain.tld",
"netmask": "255.255.255.0",
"static": "0",
"mac_address": mac
}
},
"interfaces_extra": {
"eth0": {
"onboot": "yes",
"peerdns": "no"
}
},
"power_type": "ssh",
"power_user": "root",
"kernel_options": {
"udevrules": "%s_eth0" % mac,
"netcfg/choose_interface": mac
},
"power_address": "10.20.0.253",
"name_servers": "\"%s\"" % master_ip,
"ks_meta": {
"image_uri": "http://%s:%s/%s/%s.img.gz" % (master_ip, port,
profile, profile),
"image_format": "raw",
"image_container": "gzip",
"timezone": "America/Los_Angeles",
"master_ip": master_ip,
"mco_enable": 1,
"mco_vhost": "mcollective",
"mco_pskey": "unset",
"mco_user": "mcollective",
"puppet_enable": 0,
"fuel_version": "5.0.1",
"install_log_2_syslog": 1,
"mco_password": "marionette",
"puppet_auto_setup": 1,
"puppet_master": "fuel.domain.tld",
"mco_auto_setup": 1,
"auth_key": "fake_auth_key",
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 rootdelay=90"
" nomodeset",
"ks_spaces": [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-"
"0-0"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": 200,
"type": "partition",
"file_system": "ext2",
"partition_guid": "0FC63DAF-8483-4772-8E79-"
"3D69D8477DE4",
"name": "TMP"
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 3333,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 800,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
},
],
"type": "disk",
"id": "sda",
"size": 10240
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-"
"0-1"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 4444,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
},
],
"type": "disk",
"id": "sdb",
"size": 10240
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-"
"0-2"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 1971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
},
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:04.0-scsi-0:0:2:0",
"size": 10240
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 2047,
"volumes": [
{
"mount": "/",
"size": 1900,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 43,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "min",
"label": "Zero size volume",
"min_size": 0,
"volumes": [
{
"mount": "none",
"size": 0,
"type": "lv",
"name": "zero_size",
"file_system": "xfs"
}
],
"type": "vg",
"id": "zero_size"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 1120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": 1757,
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
},
"mco_connector": "rabbitmq",
"mco_host": master_ip
},
"name": "node-1",
"hostname": "node-1.domain.tld",
"slave_name": "node-1",
"power_pass": "/root/.ssh/bootstrap.rsa",
"netboot_enabled": "1"
}
def render_template(self,
template_name,
template_dir=os.path.join(os.path.dirname(__file__),
'templates'),
template_data=None):
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template(template_name)
return template.render(**(template_data or {}))

View File

@ -0,0 +1,100 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
cloud-init-per instance disable_selinux_on_the_fly setenforce 0
cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 service network stop
DEFAULT_GW={{ MASTER_IP }}
ADMIN_MAC={{ ADMIN_MAC }}
ADMIN_IF=$(echo {{ UDEVRULES }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
INSTALL_IF=$(ifconfig | grep "$ADMIN_MAC" | head -1 | cut -d' ' -f1)
NETADDR=( $(ifconfig $INSTALL_IF | grep -oP "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}") )
if [ ! -z "$(grep $ADMIN_IF /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF | grep dhcp)" ] ; then
echo -e "# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR=${NETADDR[0]}\nNETMASK=${NETADDR[2]}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n" > /etc/sysconfig/network-scripts/ifcfg-"$ADMIN_IF"
fi
cloud-init-per instance set_gateway echo GATEWAY="$DEFAULT_GW" | tee -a /etc/sysconfig/network
#Add static udev rules
cloud-init-per instance udev_persistent_net2 echo {{ UDEVRULES }} | tr ' ' '\n' | grep udevrules | tr '[:upper:]' '[:lower:]' | sed -e 's/udevrules=//g' -e 's/,/\n/g' | sed -e "s/^/SUBSYSTEM==\"net\",\ ACTION==\"add\",\ DRIVERS==\"?*\",\ ATTR{address}==\"/g" -e "s/_/\",\ ATTR{type}==\"1\",\ KERNEL==\"eth*\",\ NAME=\"/g" -e "s/$/\"/g" | tee /etc/udev/rules.d/70-persistent-net.rules
cloud-init-per instance udev_persistent_net3 udevadm control --reload-rules
cloud-init-per instance udev_persistent_net4 udevadm trigger --attr-match=subsystem=net
cloud-init-per instance udev_persistent_net5 service network start
# end of udev
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :)
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 echo nf_conntrack_ipv4 | tee -a /etc/rc.modules
cloud-init-per instance conntrack_ipv6 echo nf_conntrack_ipv6 | tee -a /etc/rc.modules
cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules
cloud-init-per instance conntrack_max echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump
cloud-init-per instance set_coredump echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf
cloud-init-per instance set_chmod chmod 777 /var/log/coredump
cloud-init-per instance set_limits echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf
#NOTE: disabled for centos?
#cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf
# ntp sync
cloud-init-per instance service ntp stop | tee /dev/null
cloud-init-per instance sync_date ntpdate -t 4 -b {{ MASTER_IP }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf3 echo 0 > /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf_0 chown ntp: /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf3 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf4 echo "server {{ MASTER_IP }} burst iburst" | tee -a /etc/ntp.conf
# Point installed ntpd to Master node
cloud-init-per instance set_ntpdate sed -i 's/SYNC_HWCLOCK\s*=\s*no/SYNC_HWCLOCK=yes/' /etc/sysconfig/ntpdate
cloud-init-per instance set_ntpd_0 chkconfig ntpd on
cloud-init-per instance set_ntpd_1 chkconfig ntpdate on
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent echo 'flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1"' | tee /etc/rc.local
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
cloud-init-per instance clean_repos find /etc/yum.repos.d/. -name '*.repo' -delete

View File

@ -0,0 +1,74 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop
ADMIN_MAC={{ ADMIN_MAC }}
ADMIN_IF=$(echo {{ UDEVRULES }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
INSTALL_IF=$(ifconfig | grep "$ADMIN_MAC" | head -1 | cut -d' ' -f1)
# Check if we do not already have static config (or interface seems unconfigured)
NETADDR=( $(ifconfig $INSTALL_IF | grep -oP "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}") )
if [ ! -z "$(grep $ADMIN_IF /etc/network/interfaces.d/ifcfg-$ADMIN_IF | grep dhcp)" ] ; then
echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress ${NETADDR[0]}\n\tnetmask ${NETADDR[2]}\n\tbroadcast ${NETADDR[1]}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF"
fi
#Add static udev rules
cloud-init-per instance udev_persistent_net2 echo {{ UDEVRULES }} | tr ' ' '\n' | grep udevrules | tr '[:upper:]' '[:lower:]' | sed -e 's/udevrules=//g' -e 's/,/\n/g' | sed -e "s/^/SUBSYSTEM==\"net\",\ ACTION==\"add\",\ DRIVERS==\"?*\",\ ATTR{address}==\"/g" -e "s/_/\",\ ATTR{type}==\"1\",\ KERNEL==\"eth*\",\ NAME=\"/g" -e "s/$/\"/g" | tee /etc/udev/rules.d/70-persistent-net.rules
cloud-init-per instance udev_persistent_net3 udevadm control --reload-rules
cloud-init-per instance udev_persistent_net4 udevadm trigger --attr-match=subsystem=net
cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start
# end of udev
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 echo nf_conntrack_ipv4 | tee -a /etc/modules
cloud-init-per instance conntrack_ipv6 echo nf_conntrack_ipv6 | tee -a /etc/modules
cloud-init-per instance conntrack_max echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf
# ntp sync
cloud-init-per instance service ntp stop | tee /dev/null
cloud-init-per instance sync_date ntpdate -t 4 -b {{ MASTER_IP }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf3 echo 0 > /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf3 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf4 echo "server {{ MASTER_IP }} burst iburst" | tee -a /etc/ntp.conf
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent echo 'flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1"' | tee /etc/rc.local
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml

View File

@ -0,0 +1,105 @@
#cloud-config
disable_ec2_metadata: true
disable_root: false
ssh_authorized_keys:
- {{ SSH_AUTH_KEY }}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ TIMEZONE }}
hostname: {{ HOSTNAME }}
fqdn: {{ FQDN }}
# TODO(kozhukalov) name_servers is set as "1.2.3.4,1.2.3.5"
resolv_conf:
nameservers: [ {{ NAME_SERVERS }} ]
searchdomains:
- {{ SEARCH_DOMAIN }}
# domain: {{ DOMAIN }}
# options:
# rotate: true
# timeout: 1
# add entries to rsyslog configuration
rsyslog:
- filename: 10-log2master.conf
content: |
$template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n"
*.* @{{ MASTER_IP }};LogToMaster
#TODO(agordeev):
#mounts: fill /etc/fstab
runcmd:
- sed -i /etc/rc.d/init.d/mcollective -e 's/\(# chkconfig:\s\+[-0-6]\+\) [0-9]\+ \([0-9]\+\)/\1 81 \2/'
- /sbin/chkconfig mcollective on
# that module's missing in 0.6.3, but existent for >= 0.7.3
write_files:
- content: |
---
url: {{ MASTER_URL }}
path: /etc/nailgun-agent/config.yaml
- content: target
path: /etc/nailgun_systemtype
yum_repos:
{% for repo in KS_REPOS %}
{{ repo.name }}:
baseurl: {{ repo.url }}
enabled: true
gpgcheck: false
{% endfor %}
mcollective:
conf:
main_collective: mcollective
collectives: mcollective
libdir: /usr/share/mcollective/plugins
logfile: /var/log/mcollective.log
loglevel: debug
daemonize: 1
#NOTE: direct_addressing is 1 for ubuntu
direct_addressing: 1
ttl: 4294957
securityprovider: psk
plugin.psk: {{ MCO_PSKEY }}
connector: {{ MCO_CONNECTOR }}
plugin.rabbitmq.vhost: {{ MCO_VHOST }}
plugin.rabbitmq.pool.size: 1
plugin.rabbitmq.pool.1.host: {{ MCO_HOST }}
plugin.rabbitmq.pool.1.port: {{ MCO_PORT|default(61613) }}
plugin.rabbitmq.pool.1.user: {{ MCO_USER }}
plugin.rabbitmq.pool.1.password: {{ MCO_PASSWORD }}
plugin.rabbitmq.heartbeat_interval: 30
factsource: yaml
plugin.yaml: /etc/mcollective/facts.yaml
puppet:
conf:
main:
logdir: /var/log/puppet
rundir: /var/run/puppet
ssldir: $vardir/ssl
pluginsync: true
agent:
classfile: $vardir/classes.txt
localconfig: $vardir/localconfig
server: {{ PUPPET_MASTER }}
report: false
configtimeout: 600
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -0,0 +1,94 @@
#cloud-config
disable_ec2_metadata: true
disable_root: false
ssh_authorized_keys:
- {{ SSH_AUTH_KEY }}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ TIMEZONE }}
hostname: {{ HOSTNAME }}
fqdn: {{ FQDN }}
# TODO(kozhukalov) name_servers is set as "1.2.3.4,1.2.3.5"
resolv_conf:
nameservers: [ {{ NAME_SERVERS }} ]
searchdomains:
- {{ SEARCH_DOMAIN }}
# domain: {{ DOMAIN }}
# options:
# rotate: true
# timeout: 1
# add entries to rsyslog configuration
rsyslog:
- filename: 10-log2master.conf
content: |
$template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n"
*.* @{{ MASTER_IP }};LogToMaster
#TODO(agordeev):
#mounts: fill /etc/fstab
# that module's missing in 0.6.3, but existent for >= 0.7.3
write_files:
- content: |
---
url: {{ MASTER_URL }}
path: /etc/nailgun-agent/config.yaml
- content: target
path: /etc/nailgun_systemtype
- content: APT::Get::AllowUnauthenticated 1;
path: /etc/apt/apt.conf.d/02mirantis-allow-unsigned
apt_sources:
- source: deb http://{{ MASTER_IP }}:8080/ubuntu/fuelweb/x86_64 precise main
mcollective:
conf:
main_collective: mcollective
collectives: mcollective
libdir: /usr/share/mcollective/plugins
logfile: /var/log/mcollective.log
loglevel: debug
daemonize: 1
direct_addressing: 0
ttl: 4294957
securityprovider: psk
plugin.psk: {{ MCO_PSKEY }}
connector: {{ MCO_CONNECTOR }}
plugin.rabbitmq.vhost: {{ MCO_VHOST }}
plugin.rabbitmq.pool.size: 1
plugin.rabbitmq.pool.1.host: {{ MCO_HOST }}
plugin.rabbitmq.pool.1.port: {{ MCO_PORT|default(61613) }}
plugin.rabbitmq.pool.1.user: {{ MCO_USER }}
plugin.rabbitmq.pool.1.password: {{ MCO_PASSWORD }}
plugin.rabbitmq.heartbeat_interval: 30
factsource: yaml
plugin.yaml: /etc/mcollective/facts.yaml
puppet:
conf:
main:
logdir: /var/log/puppet
rundir: /var/run/puppet
ssldir: $vardir/ssl
pluginsync: true
agent:
classfile: $vardir/classes.txt
localconfig: $vardir/localconfig
server: {{ PUPPET_MASTER }}
report: false
configtimeout: 600
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -0,0 +1,10 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
network-interfaces: |
iface {{ ADMIN_IFACE_NAME|default("eth0") }} inet static
address {{ ADMIN_IP }}
# network 192.168.1.0
netmask {{ ADMIN_MASK }}
# broadcast 192.168.1.255
# gateway 192.168.1.254
hostname: {{ HOSTNAME }}

View File

@ -0,0 +1,10 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
network-interfaces: |
iface {{ ADMIN_IFACE_NAME|default("eth0") }} inet static
address {{ ADMIN_IP }}
# network 192.168.1.0
netmask {{ ADMIN_MASK }}
# broadcast 192.168.1.255
# gateway 192.168.1.254
hostname: {{ HOSTNAME }}

View File

@ -0,0 +1,249 @@
{
"profile": "{{PROFILE}}",
"hostname": "node-1.domain.tld",
"name_servers_search": "\"domain.tld\"",
"uid": "1",
"interfaces": {
"eth0": {
"dns_name": "node-1.domain.tld",
"netmask": "255.255.255.0",
"mac_address": "{{MAC}}",
"ip_address": "{{IP}}",
"static": "0"
}
},
"interfaces_extra": {
"eth0": {
"onboot": "yes",
"peerdns": "no"
}
},
"power_type": "ssh",
"power_user": "root",
"kernel_options": {
"udevrules": "{{MAC}}_eth0",
"netcfg/choose_interface": "{{MAC}}"
},
"name": "node-1",
"power_address": "{{IP}}",
"name_servers": "\"{{MASTER_IP}}\"",
"slave_name": "node-1",
"netboot_enabled": "1",
"power_pass": "/root/.ssh/bootstrap.rsa",
"ks_meta": {
"mco_user": "mcollective",
"mco_enable": 1,
"mco_vhost": "mcollective",
"mco_password": "marionette",
"auth_key": "fake_auth_key",
"puppet_enable": 0,
"image_container": "gzip",
"puppet_auto_setup": 1,
"mco_pskey": "unset",
"master_ip": "{{MASTER_IP}}",
{% block image_data %}
"image_data": {
"/": {
"uri": "http://{{MASTER_IP}}:{{MASTER_HTTP_PORT}}/{{PROFILE}}.img.gz",
"format": "ext4",
"container": "gzip"
}
},
{% endblock image_data %}
"mco_connector": "rabbitmq",
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset",
{% block ks_spaces %}
"ks_spaces": [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"partition_guid": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
"name": "TMP",
"mount": "/tmp",
"type": "partition",
"file_system": "ext2",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 64,
"size": 3333
},
{
"vg": "image",
"type": "pv",
"lvm_meta_size": 64,
"size": 800
}
],
"type": "disk",
"id": "sda",
"size": 10240
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-1"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 0,
"size": 0
},
{
"vg": "image",
"type": "pv",
"lvm_meta_size": 64,
"size": 4444
}
],
"type": "disk",
"id": "sdb",
"size": 10240
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-2"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 0,
"size": 0
},
{
"vg": "image",
"type": "pv",
"lvm_meta_size": 64,
"size": 1971
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:04.0-scsi-0:0:2:0",
"size": 10240
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 2047,
"volumes": [
{
"mount": "/",
"type": "lv",
"name": "root",
"file_system": "ext4",
"size": 1900
},
{
"mount": "swap",
"type": "lv",
"name": "swap",
"file_system": "swap",
"size": 43
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "min",
"label": "Zero size volume",
"min_size": 0,
"volumes": [
{
"mount": "none",
"type": "lv",
"name": "zero_size",
"file_system": "xfs",
"size": 0
}
],
"type": "vg",
"id": "zero_size"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 1120,
"volumes": [
{
"mount": "/var/lib/glance",
"type": "lv",
"name": "glance",
"file_system": "xfs",
"size": 1757
}
],
"type": "vg",
"id": "image"
}
]
{% endblock ks_spaces %}
},
"fuel_version": "5.0.1",
"install_log_2_syslog": 1,
"timezone": "America/Los_Angeles",
"image_format": "raw",
"mco_host": "{{MASTER_IP}}",
"puppet_master": "fuel.domain.tld",
"mco_auto_setup": 1
}
}

View File

@ -0,0 +1 @@
{% extends 'provision.json' %}

View File

@ -0,0 +1,239 @@
{% extends 'provision.json' %}
{% block ks_spaces %}
"ks_spaces": [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"partition_guid": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
"name": "TMP",
"mount": "/tmp",
"type": "partition",
"file_system": "ext2",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 64,
"size": 3333
},
{
"vg": "image",
"type": "pv",
"lvm_meta_size": 64,
"size": 800
},
{
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
},
{
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
],
"type": "disk",
"id": "sda",
"size": 10240
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-1"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 0,
"size": 0
},
{
"vg": "image",
"type": "pv",
"lvm_meta_size": 64,
"size": 4444
},
{
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
},
{
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
],
"type": "disk",
"id": "sdb",
"size": 10240
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-2"
],
"free_space": 10001,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"file_system": "ext2",
"name": "Boot",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"vg": "os",
"type": "pv",
"lvm_meta_size": 0,
"size": 0
},
{
"vg": "image",
"type": "pv",
"lvm_meta_size": 64,
"size": 1971
},
{
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
},
{
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:04.0-scsi-0:0:2:0",
"size": 10240
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 2047,
"volumes": [
{
"mount": "/",
"type": "lv",
"name": "root",
"file_system": "ext4",
"size": 1900
},
{
"mount": "swap",
"type": "lv",
"name": "swap",
"file_system": "swap",
"size": 43
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "min",
"label": "Zero size volume",
"min_size": 0,
"volumes": [
{
"mount": "none",
"type": "lv",
"name": "zero_size",
"file_system": "xfs",
"size": 0
}
],
"type": "vg",
"id": "zero_size"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 1120,
"volumes": [
{
"mount": "/var/lib/glance",
"type": "lv",
"name": "glance",
"file_system": "xfs",
"size": 1757
}
],
"type": "vg",
"id": "image"
}
]
{% endblock ks_spaces %}

View File

@ -14,86 +14,148 @@
import hashlib
import json
import os
import mock
import time
#TODO(agordeev): remove fuel_agent requirement when configdrive artefact
# will become ready
from fuel_agent.utils import hardware_utils as hu
from fuel_agent_ci.tests import base
TEMPLATE_PATH = '/usr/share/fuel-agent/cloud-init-templates'
CONFIG_DRIVE_PATH = '/tmp/config-drive.img'
TMP_PATH = '/tmp'
from fuel_agent_ci import utils
class TestConfigDrive(base.BaseFuelAgentCITest):
def setUp(self):
super(TestConfigDrive, self).setUp()
#FIXME(agordeev): fuel_agent package isn't ready,
# so copying templates to TEMPLATE_PATH dir
self.env.ssh_by_name(self.name).run(
'mkdir -p %s' % TEMPLATE_PATH)
self.env.ssh_by_name(self.name).run(
"find /root/var/tmp/fuel_agent_ci/fuel_agent -name '*jinja2' -exec"
" cp '{}' %s \;" % TEMPLATE_PATH)
self.env.ssh_by_name(self.name).run(
'configdrive', command_timeout=base.SSH_COMMAND_TIMEOUT)
@mock.patch.object(hu, 'list_block_devices')
def test_config_driver(self, mock_lbd):
def _build_configdrive(self, profile):
data = json.loads(self.render_template(
template_name='provision.json',
template_data={
'IP': self.dhcp_hosts[0]['ip'],
'MAC': self.dhcp_hosts[0]['mac'],
'MASTER_IP': self.net.ip,
'MASTER_HTTP_PORT': self.http.port,
'PROFILE': profile,
}
))
self.ssh.put_content(json.dumps(data), '/tmp/provision.json')
admin_interface = filter(
lambda x: (x['mac_address'] ==
data['kernel_options']['netcfg/choose_interface']),
[dict(name=name, **spec) for name, spec
in data['interfaces'].iteritems()])[0]
with open('/tmp/boothook.txt', 'wb') as f:
f.write(self.render_template(
template_name='boothook_%s.jinja2' % profile.split('_')[0],
template_data={
'MASTER_IP': data['ks_meta']['master_ip'],
'ADMIN_MAC': \
data['kernel_options']['netcfg/choose_interface'],
'UDEVRULES': data['kernel_options']['udevrules']
}
))
with open('/tmp/cloud_config.txt', 'wb') as f:
f.write(self.render_template(
template_name='cloud_config_%s.jinja2' % profile.split('_')[0],
template_data={
'SSH_AUTH_KEY': data['ks_meta']['auth_key'],
'TIMEZONE': data['ks_meta']['timezone'],
'HOSTNAME': data['hostname'],
'FQDN': data['hostname'],
'NAME_SERVERS': data['name_servers'],
'SEARCH_DOMAIN': data['name_servers_search'],
'MASTER_IP': data['ks_meta']['master_ip'],
'MASTER_URL': \
'http://%s:8000/api' % data['ks_meta']['master_ip'],
# FIXME(kozhukalov):
# 'KS_REPOS': IS NOT SET YET,
'MCO_PSKEY': data['ks_meta']['mco_pskey'],
'MCO_CONNECTOR': data['ks_meta']['mco_connector'],
'MCO_VHOST': data['ks_meta']['mco_vhost'],
'MCO_HOST': data['ks_meta']['mco_host'],
# 'MCO_PORT': IS NOT SET, DEFAULT IS USED
'MCO_USER': data['ks_meta']['mco_user'],
'MCO_PASSWORD': data['ks_meta']['mco_password'],
'PUPPET_MASTER': data['ks_meta']['puppet_master']
}
))
with open('/tmp/meta-data', 'wb') as f:
f.write(self.render_template(
template_name='meta-data_%s.jinja2' % profile.split('_')[0],
template_data={
'ADMIN_IFACE_NAME': admin_interface['name'],
'ADMIN_IP': admin_interface['ip_address'],
'ADMIN_MASK': admin_interface['netmask'],
'HOSTNAME': data['hostname']
}
))
# write-mime-multipart is provided by cloud-utils package
utils.execute('write-mime-multipart --output=/tmp/user-data '
'/tmp/boothook.txt:text/cloud-boothook '
'/tmp/cloud_config.txt:text/cloud-config')
# That does not make sense to build config-drive.img as we can not
# use it as a reference for comparing md5 sum.
# The reason for that is that write-mime-multipart generates
# random boundary identifier in the beginning of user-data.
def _test_configdrive(self, profile):
def _get_md5sum(file_path, size=-1):
md5 = None
with open(file_path) as f:
md5 = hashlib.md5(f.read(size)).hexdigest()
return md5
hu_lbd = self.env.ssh_by_name(self.name).run(
'python -c "from fuel_agent.utils import hardware_utils as hu;'
'import json; print json.dumps(hu.list_block_devices())"',
command_timeout=base.SSH_COMMAND_TIMEOUT)
mock_lbd.return_value = json.loads(hu_lbd)
#TODO(agordeev): replace with prebuild `configdrive` artifact
self.mgr.do_parsing()
self.mgr.do_configdrive()
cd_size = os.path.getsize(CONFIG_DRIVE_PATH)
cd_md5 = _get_md5sum(CONFIG_DRIVE_PATH)
#NOTE(agordeev): assuming that configdrive was added lastly to images
fs_type = self.env.ssh_by_name(self.name).run(
'blkid -o value -s TYPE %s' % CONFIG_DRIVE_PATH)
self.assertEqual('iso9660', fs_type)
label_output = self.env.ssh_by_name(self.name).run(
'blkid -o value -s LABEL %s' % CONFIG_DRIVE_PATH)
self.assertEqual('cidata', label_output)
#TODO(agordeev): add test which checks deployed configdrive image
actual_md5 = _get_md5sum(CONFIG_DRIVE_PATH, size=cd_size)
self.assertEqual(cd_md5, actual_md5)
ud_output_path = os.path.join(TMP_PATH, 'user-data')
md_output_path = os.path.join(TMP_PATH, 'meta-data')
# create mount point for checking the configdrive's content
self.env.ssh_by_name(self.name).run('mkdir - p /tmp/cfgdrv')
self.env.ssh_by_name(self.name).run(
'mount -o loop %s /tmp/cfgdrv' % CONFIG_DRIVE_PATH)
self._build_configdrive(profile)
self.ssh.run('configdrive')
self.ssh.get_file('/tmp/config-drive.img',
'/tmp/actual-config-drive.img')
# checking configdrive file system type
fs_type = utils.execute(
'blkid -o value -s TYPE /tmp/actual-config-drive.img')
self.assertEqual('iso9660', str(fs_type).strip())
# checking configdrive label
label_output = utils.execute(
'blkid -o value -s LABEL /tmp/actual-config-drive.img')
self.assertEqual('cidata', str(label_output).strip())
# mounting configdrive to check its content
utils.execute('mkdir -p /tmp/cfgdrv')
utils.execute('sudo mount -o loop '
'/tmp/actual-config-drive.img /tmp/cfgdrv')
#NOTE(agordeev): mime boundary should be the same in both files,
# since boundary is always randomly generated,
# thus magic prevents from checksum differencies
expected_boundary = None
with open(ud_output_path) as f:
with open('/tmp/user-data') as f:
expected_boundary = f.read().split('\n')[0].split('"')[1]
actual_boundary = self.env.ssh_by_name(self.name).run(
'head -n1 %s' % ud_output_path).split('"')[1]
md5sum_userdata_output = self.env.ssh_by_name(self.name).run(
'sed -e s/%s/%s/ %s | md5sum' % (actual_boundary,
expected_boundary,
'/tmp/cfgdrv/user-data'))
actual_boundary = str(utils.execute(
'head -n1 /tmp/cfgdrv/user-data')).split('"')[1]
actual_md5_userdata = str(utils.execute(
'sed -e s/%s/%s/ %s | md5sum' %
(actual_boundary, expected_boundary,
'/tmp/cfgdrv/user-data'))).split()[0]
actual_md5_metadata = str(utils.execute(
'md5sum /tmp/cfgdrv/meta-data')).split()[0]
md5sum_metadata_output = self.env.ssh_by_name(self.name).run(
'md5sum /tmp/cfgdrv/meta-data')
actual_md5_userdata = md5sum_userdata_output.split()[0]
actual_md5_metadata = md5sum_metadata_output.split()[0]
expected_md5_userdata = _get_md5sum(ud_output_path)
expected_md5_metadata = _get_md5sum(md_output_path)
self.assertEqual(expected_md5_userdata, actual_md5_userdata)
self.assertEqual(expected_md5_metadata, actual_md5_metadata)
# getting reference md5 for user-data and meta-data
md5_userdata = _get_md5sum('/tmp/user-data')
md5_metadata = _get_md5sum('/tmp/meta-data')
self.assertEqual(md5_userdata, actual_md5_userdata)
self.assertEqual(md5_metadata, actual_md5_metadata)
def test_configdrive_centos(self):
self._test_configdrive('centos_65_x86_64')
def test_configdrive_ubuntu(self):
self._test_configdrive('ubuntu_1204_x86_64')
def tearDown(self):
utils.execute('sudo umount -f /tmp/cfgdrv')
utils.execute('rm /tmp/actual-config-drive.img '
'/tmp/user-data /tmp/meta-data '
'/tmp/cloud_config.txt /tmp/boothook.txt')
super(TestConfigDrive, self).tearDown()

View File

@ -14,46 +14,48 @@
import json
import os
import time
from fuel_agent_ci.tests import base
from fuel_agent_ci import utils
TARGET_DEVICE = '/dev/mapper/os-root'
class TestCopyImage(base.BaseFuelAgentCITest):
def _test_copyimage(self, profile):
#NOTE(agordeev): update provision.json with proper image specs
p_data = base.get_filled_provision_data(self.dhcp_hosts[0]['ip'],
self.dhcp_hosts[0]['mac'],
self.net.ip,
self.http_obj.port, profile)
self.env.ssh_by_name(self.name).put_content(
json.dumps(p_data), os.path.join('/tmp', 'provision.json'))
provision_data = json.loads(self.render_template(
template_data={
'IP': self.dhcp_hosts[0]['ip'],
'MAC': self.dhcp_hosts[0]['mac'],
'MASTER_IP': self.net.ip,
'MASTER_HTTP_PORT': self.http.port,
'PROFILE': profile
},
template_name='provision.json'
))
self.ssh.put_content(json.dumps(provision_data), '/tmp/provision.json')
#NOTE(agordeev): disks should be partitioned before applying the image
self.env.ssh_by_name(self.name).run(
'partition', command_timeout=base.SSH_COMMAND_TIMEOUT)
self.env.ssh_by_name(self.name).run(
'copyimage', command_timeout=base.SSH_COMMAND_TIMEOUT)
self.ssh.run('partition')
self.ssh.run('copyimage')
#NOTE(agordeev): size and checksum needed for checking deployed image
local_img_path = os.path.join(self.env.envdir, self.http_obj.http_root,
profile, profile + '.img.gz')
md5sum_output = utils.execute(
'gunzip -cd %s | md5sum' % local_img_path)
img_size_output = utils.execute('gzip -ql %s' % local_img_path)
img_size = int(img_size_output[1].split()[1]) / 2 ** 20
expected_md5 = md5sum_output[1].split()[0]
local_img_path = os.path.join(
self.env.envdir, self.http.http_root, profile + '.img.gz')
expected_md5 = str(utils.execute(
'gunzip -cd %s | md5sum' % local_img_path)).split()[0]
img_size = int(str(utils.execute(
'gzip -ql %s' % local_img_path)).split()[1]) / 2 ** 20
#NOTE(agordeev): the partition can be bigger than actual size of image
# so calculating checksum of rewritten partition part
# assuming that image has size in MB w/o fractional part
md5sum_metadata_output = self.env.ssh_by_name(self.name).run(
'dd if=%s bs=1M count=%s | md5sum' % (TARGET_DEVICE, img_size),
command_timeout=base.SSH_COMMAND_TIMEOUT)
actual_md5 = md5sum_metadata_output.split()[0]
actual_md5 = self.ssh.run(
'dd if=%s bs=1M count=%s | md5sum' %
('/dev/mapper/os-root', img_size)).split()[0]
self.assertEqual(expected_md5, actual_md5)
def test_copyimage_centos(self):
self._test_copyimage('centos')
self._test_copyimage('centos_65_x86_64')
def test_copyimage_ubuntu(self):
self._test_copyimage('ubuntu')
self._test_copyimage('ubuntu_1204_x86_64')

View File

@ -90,21 +90,21 @@ class TestPartition(base.BaseFuelAgentCITest):
_split_strip_to_lines(actual))
def _test_partitioning(self, canned_parted_info):
self.env.ssh_by_name(self.name).run(
'partition', command_timeout=base.SSH_COMMAND_TIMEOUT)
self.ssh.run('partition')
#FIXME(agordeev): mdadm resyncing time
time.sleep(10)
for disk_name, expected_parted_info in canned_parted_info.items():
actual_parted_info = self.env.ssh_by_name(self.name).run(
actual_parted_info = self.ssh.run(
'parted -s /dev/%s -m unit MiB print free' % disk_name)
self.compare_output(expected_parted_info, actual_parted_info)
actual_guid = self.env.ssh_by_name(self.name).run(
actual_guid = self.ssh.run(
'sgdisk -i 4 /dev/sda').split('\n')[0].split()[3]
self.assertEqual("0FC63DAF-8483-4772-8E79-3D69D8477DE4", actual_guid)
actual_md_output = self.env.ssh_by_name(self.name).run(
actual_md_output = self.ssh.run(
'mdadm --detail %s' % '/dev/md0')
#NOTE(agordeev): filter out lines with time stamps and UUID
@ -141,14 +141,14 @@ class TestPartition(base.BaseFuelAgentCITest):
/dev/sda6;image;668.00m;800.00m
/dev/sdb4;image;4312.00m;4444.00m
/dev/sdc4;image;1840.00m;1971.00m"""
pvdisplay_actual_output = self.env.ssh_by_name(self.name).run(
pvdisplay_actual_output = self.ssh.run(
'pvdisplay -C --noheading --units m --options '
'pv_name,vg_name,pv_size,dev_size --separator ";"')
self.compare_output(pvdisplay_expected_output, pvdisplay_actual_output)
vgdisplay_expected_output = """image;6820.00m;5060.00m
os;3204.00m;1260.00m"""
vgdisplay_actual_output = self.env.ssh_by_name(self.name).run(
vgdisplay_actual_output = self.ssh.run(
'vgdisplay -C --noheading --units m --options '
'vg_name,vg_size,vg_free --separator ";"')
self.compare_output(vgdisplay_expected_output, vgdisplay_actual_output)
@ -156,7 +156,7 @@ class TestPartition(base.BaseFuelAgentCITest):
lvdisplay_expected_output = """glance;1760.00m;image
root;1900.00m;os
swap;44.00m;os"""
lvdisplay_actual_output = self.env.ssh_by_name(self.name).run(
lvdisplay_actual_output = self.ssh.run(
'lvdisplay -C --noheading --units m --options '
'lv_name,lv_size,vg_name --separator ";"')
self.compare_output(lvdisplay_expected_output, lvdisplay_actual_output)
@ -167,29 +167,47 @@ class TestPartition(base.BaseFuelAgentCITest):
('/dev/mapper/os-swap', 'swap', ''),
('/dev/mapper/image-glance', 'xfs', '')]
for device, fs_type, label in expected_fs_data:
fs_type_output = self.env.ssh_by_name(self.name).run(
fs_type_output = self.ssh.run(
'blkid -o value -s TYPE %s' % device)
self.assertEqual(fs_type, fs_type_output)
label_output = self.env.ssh_by_name(self.name).run(
label_output = self.ssh.run(
'blkid -o value -s LABEL %s' % device)
self.assertEqual(label, label_output)
#TODO(agordeev): check fs options and mount point
def test_do_partitioning_gpt(self):
provision_data = self.render_template(
template_data={
'IP': self.dhcp_hosts[0]['ip'],
'MAC': self.dhcp_hosts[0]['mac'],
'MASTER_IP': self.net.ip,
'MASTER_HTTP_PORT': self.http.port,
'PROFILE': 'ubuntu_1204_x86_64'
},
template_name='provision.json'
)
self.ssh.put_content(provision_data, '/tmp/provision.json')
self._test_partitioning(REGULAR_PARTED_INFO)
def test_do_ceph_partitioning(self):
p_data = base.get_filled_provision_data_for_ceph(
self.dhcp_hosts[0]['ip'], self.dhcp_hosts[0]['mac'], self.net.ip,
self.http_obj.port)
self.env.ssh_by_name(self.name).put_content(
json.dumps(p_data), os.path.join('/tmp', 'provision.json'))
provision_data = self.render_template(
template_data={
'IP': self.dhcp_hosts[0]['ip'],
'MAC': self.dhcp_hosts[0]['mac'],
'MASTER_IP': self.net.ip,
'MASTER_HTTP_PORT': self.http.port,
'PROFILE': 'ubuntu_1204_x86_64'
},
template_name='provision_ceph.json'
)
self.ssh.put_content(provision_data, '/tmp/provision.json')
self._test_partitioning(CEPH_PARTED_INFO)
#NOTE(agordeev): checking if GUIDs are correct for ceph partitions
# NOTE(agordeev): checking if GUIDs are correct for ceph partitions
ceph_partitions = {'sda': 7, 'sdb': 5, 'sdc': 5}
for disk_name, partition_num in ceph_partitions.items():
actual_guid = self.env.ssh_by_name(self.name).run(
actual_guid = self.ssh.run(
'sgdisk -i %s /dev/%s' % (partition_num, disk_name)).\
split('\n')[0].split()[3]
self.assertEqual(base.CEPH_DATA['partition_guid'].upper(),
self.assertEqual('4fbd7e29-9d25-41b8-afd0-062c0ceff05d'.upper(),
actual_guid)
# FIXME(kozhukalov): check if ceph journals are created and their GUIDs

View File

@ -34,11 +34,22 @@ def genmac(start=None):
return mac
class ExecResult(object):
def __init__(self, stdout, stderr='', return_code=0):
self.stdout = stdout
self.stderr = stderr
self.return_code = return_code
def __str__(self):
return self.stdout
def __repr__(self):
return self.stdout
def execute(command, to_filename=None, cwd=None):
LOG.debug('Trying to execute command: %s', command)
commands = [c.strip() for c in re.split(ur'\|', command)]
env = os.environ
env['PATH'] = '/bin:/usr/bin:/sbin:/usr/sbin'
to_file = None
if to_filename:
@ -54,7 +65,7 @@ def execute(command, to_filename=None, cwd=None):
process.append(subprocess.Popen(
shlex.split(encoded_command),
env=env,
env=os.environ,
stdin=(process[-1].stdout if process else None),
stdout=(to_file
if (len(process) == len(commands) - 1) and to_file
@ -68,4 +79,5 @@ def execute(command, to_filename=None, cwd=None):
if len(process) >= 2:
process[-2].stdout.close()
stdout, stderr = process[-1].communicate()
return (process[-1].returncode, stdout, stderr)
out = ExecResult(stdout, stderr=stderr, return_code=process[-1].returncode)
return out

View File

@ -5,3 +5,4 @@ xmlbuilder>=1.0
PyYAML>=3.11
# pygit2>=0.20.3
venvgit2>=0.20.3.0
requests>=1.2.3

View File

@ -12,6 +12,7 @@ vm:
interfaces:
- mac: "52:54:a5:45:65:ae"
network: "net"
model_type: "virtio"
disks:
- size: "10240"
- size: "10240"
@ -23,11 +24,12 @@ ssh:
host: "10.250.2.20"
user: "root"
key_filename: "ssh/id_rsa"
command_timeout: 150
repo:
- name: "fuel_agent"
url: "https://github.com/stackforge/fuel-web.git"
branch: "master"
branch: "fuel_agent_bootloader_functional"
path: "fuel_agent"
artifact:

View File

@ -0,0 +1,10 @@
hacking>=0.8.0,<0.9
mock>=1.0
oslotest==1.0
testtools>=0.9.34
testrepository>=0.0.18
Jinja2
# those dependencies are to be able to run nosetests
nose==1.1.2
unittest2==0.5.1
nose2==0.4.1

View File

@ -8,8 +8,11 @@ usedevelop = True
install_command = pip install --allow-external -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
python setup.py testr --slowest --testr-args='{posargs:}'
nosetests -sv {posargs:fuel_agent_ci/tests}
;commands =
; python setup.py testr --slowest --testr-args='{posargs:}'
[tox:jenkins]
downloadcache = ~/cache/pip