Release 1.27.0

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJYk8+9AAoJEBty/58O8cX8LdIH+wU/VrEVs0XYohiL6DUgabzs
 112U3UUihH5xMc/ca9Tarx+XwEvfMZkwYN2Qr0JoRJjmSt2AL6AezUhGSV+98vaY
 iQEccaFDFYlyDHm4V2r7N1xwS0B3mx87FPqVQQSUKlc3JsQxCy4o9RtD9aM8Gvqy
 +gAxMxL3p3O131K0Rvb0U5lC1FLgft9SuljCV8i5nU4/HdoryD6hedz2/ss8a9KG
 KKEdBKvPBKn73+nb8peQD/VXpej9C31r87q5VEjUsZkJ7gduY/qYLlGGgoBQqAXN
 WQ/ef1RkQKW5ba2jsjnk7fdOrA0+wYENxorR2WecuZbe2ieXw6fP3lYiD6VeWsM=
 =IUuh
 -----END PGP SIGNATURE-----

Merge tag '1.27.0' into merge-branch

Release 1.27.0

Change-Id: I9f6948636cae6d375d1d8315976504021f5a3bbb
This commit is contained in:
Ian Wienand 2017-02-03 11:49:45 +11:00
commit 3f8800832a
52 changed files with 546 additions and 253 deletions

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
.coverage
coverage.xml
cover/*
*~ *~
.testrepository .testrepository
*.sw? *.sw?

View File

@ -4,7 +4,7 @@ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \ OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
OS_DEBUG=${OS_DEBUG:-0} \ OS_DEBUG=${OS_DEBUG:-0} \
python -m subunit.run discover . $LISTOPT $IDOPTION ${PYTHON:-python} -m subunit.run discover . $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE test_id_option=--load-list $IDFILE
test_list_option=--list test_list_option=--list

View File

@ -36,11 +36,15 @@ parse_exclusions() {
# ignore = sete setu # ignore = sete setu
section="dib-lint" section="dib-lint"
option="ignore" option="ignore"
global_exclusions=$(python -c \ global_exclusions=$(python - <<EOF
"import ConfigParser; \ try:
conf=ConfigParser.ConfigParser(); \ import configparser
conf.read('tox.ini'); \ except ImportError:
print conf.get('$section', '$option') if conf.has_option('$section', '$option') else ''" import ConfigParser as configparser
conf=configparser.ConfigParser()
conf.read('tox.ini')
print(conf.get('$section', '$option')) if conf.has_option('$section', '$option') else ''
EOF
) )
echo $exclusions $global_exclusions echo $exclusions $global_exclusions
} }
@ -206,7 +210,7 @@ done
echo "Checking indents..." echo "Checking indents..."
for i in $(find $ELEMENTS_DIR -type f -and -name '*.rst' -or -type f -executable); do for i in $(find bin $ELEMENTS_DIR -type f -and -name '*.rst' -or -type f -executable); do
# Check for tab indentation # Check for tab indentation
if ! excluded tabindent; then if ! excluded tabindent; then
if grep -q $'^ *\t' ${i}; then if grep -q $'^ *\t' ${i}; then
@ -235,7 +239,7 @@ for i in $(find $ELEMENTS_DIR -type f -name '*.yaml'); do
import yaml import yaml
import sys import sys
try: try:
objs = yaml.load(open('$i')) objs = yaml.safe_load(open('$i'))
except yaml.parser.ParserError: except yaml.parser.ParserError:
sys.exit(1) sys.exit(1)
" "

View File

@ -1,30 +0,0 @@
#!/bin/bash
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
# Truncate /var/log files in preparation for first boot
sudo find $TARGET_ROOT/var/log -type f -exec cp /dev/null '{}' \;
# also /root logs
sudo find $TARGET_ROOT/root -name \*.log -type f -delete

View File

@ -127,6 +127,10 @@ function install_grub2 {
GRUB_CFG=/boot/grub/grub.cfg GRUB_CFG=/boot/grub/grub.cfg
fi fi
# Override the root device to the default label, and disable uuid
# lookup.
echo "GRUB_DEVICE=LABEL=${DIB_ROOT_LABEL}" >> /etc/default/grub
echo 'GRUB_DISABLE_LINUX_UUID=true' >> /etc/default/grub
echo "GRUB_TIMEOUT=${DIB_GRUB_TIMEOUT:-5}" >>/etc/default/grub echo "GRUB_TIMEOUT=${DIB_GRUB_TIMEOUT:-5}" >>/etc/default/grub
echo 'GRUB_TERMINAL="serial console"' >>/etc/default/grub echo 'GRUB_TERMINAL="serial console"' >>/etc/default/grub
echo 'GRUB_GFXPAYLOAD_LINUX=text' >>/etc/default/grub echo 'GRUB_GFXPAYLOAD_LINUX=text' >>/etc/default/grub
@ -175,17 +179,7 @@ function install_grub2 {
sed -i "s%search --no.*%%" $GRUB_CFG sed -i "s%search --no.*%%" $GRUB_CFG
sed -i "s%set root=.*%set root=(hd0,1)%" $GRUB_CFG sed -i "s%set root=.*%set root=(hd0,1)%" $GRUB_CFG
fi fi
# force use of a LABEL:
# NOTE: Updating the grub config by hand once deployed should work, its just
# prepping it in a different environment that needs fiddling.
sed -i "s%$PART_DEV%LABEL=${DIB_ROOT_LABEL}%" $GRUB_CFG
sed -i "s%search --no-floppy --fs-uuid --set=root .*$%search --no-floppy --set=root --label ${DIB_ROOT_LABEL}%" $GRUB_CFG
sed -i "s%root=UUID=[A-Za-z0-9\-]*%root=LABEL=${DIB_ROOT_LABEL}%" $GRUB_CFG
if [ "$DISTRO_NAME" = 'fedora' ] ; then
if [ "$DIB_RELEASE" = '19' ]; then
sed -i "s%UUID=[A-Za-z0-9\-]*%LABEL=${DIB_ROOT_LABEL}%" /etc/fstab
fi
fi
# Fix efi specific instructions in grub config file # Fix efi specific instructions in grub config file
if [ -d /sys/firmware/efi ]; then if [ -d /sys/firmware/efi ]; then
sed -i 's%\(initrd\|linux\)efi /boot%\1 /boot%g' $GRUB_CFG sed -i 's%\(initrd\|linux\)efi /boot%\1 /boot%g' $GRUB_CFG

View File

@ -6,13 +6,9 @@ Create a minimal image based on CentOS 7.
Use of this element will require 'yum' and 'yum-utils' to be installed on Use of this element will require 'yum' and 'yum-utils' to be installed on
Ubuntu and Debian. Nothing additional is needed on Fedora or CentOS. Ubuntu and Debian. Nothing additional is needed on Fedora or CentOS.
The `DIB_OFFLINE` or more specific `DIB_YUMCHROOT_USE_CACHE` By default, ``DIB_YUM_MINIMAL_CREATE_INTERFACES`` is set to enable the
variables can be set to prefer the use of a pre-cached root filesystem creation of ``/etc/sysconfig/network-scripts/ifcfg-eth[0|1]`` scripts to
tarball. enable DHCP on the ``eth0`` & ``eth1`` interfaces. If you do not have
By default, `DIB_YUM_MINIMAL_CREATE_INTERFACES` is set to enable the
creation of `/etc/sysconfig/network-scripts/ifcfg-eth[0|1]` scripts to
enable DHCP on the `eth0` & `eth1` interfaces. If you do not have
these interfaces, or if you are using something else to setup the these interfaces, or if you are using something else to setup the
network such as cloud-init, glean or network-manager, you would want network such as cloud-init, glean or network-manager, you would want
to set this to `0`. to set this to ``0``.

View File

@ -3,3 +3,7 @@ export DIB_RELEASE=${DIB_RELEASE:-GenericCloud}
# Useful for elements that work with fedora (dnf) & centos # Useful for elements that work with fedora (dnf) & centos
export YUM=${YUM:-yum} export YUM=${YUM:-yum}
if [ -n "${DIB_CENTOS_DISTRIBUTION_MIRROR:-}" ]; then
export DIB_DISTRIBUTION_MIRROR=$DIB_CENTOS_DISTRIBUTION_MIRROR
fi

View File

@ -6,10 +6,12 @@ fi
set -eu set -eu
set -o pipefail set -o pipefail
DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://mirror.centos.org/centos}
cat << EOF > /etc/yum.repos.d/centos6-latest.repo cat << EOF > /etc/yum.repos.d/centos6-latest.repo
[rhel6] [rhel6]
name=centos6 name=centos6
baseurl=http://mirror.centos.org/centos/6/os/x86_64/ baseurl=$DIB_DISTRIBUTION_MIRROR/6/os/x86_64/
enabled=1 enabled=1
metadata_expire=7d metadata_expire=7d
gpgcheck=0 gpgcheck=0

View File

@ -3,3 +3,7 @@ export DIB_RELEASE=GenericCloud
# Useful for elements that work with fedora (dnf) & centos # Useful for elements that work with fedora (dnf) & centos
export YUM=${YUM:-yum} export YUM=${YUM:-yum}
if [ -n "${DIB_CENTOS_DISTRIBUTION_MIRROR:-}" ]; then
export DIB_DISTRIBUTION_MIRROR=$DIB_CENTOS_DISTRIBUTION_MIRROR
fi

View File

@ -1,6 +1,11 @@
export DISTRO_NAME=debian export DISTRO_NAME=debian
export DIB_RELEASE=${DIB_RELEASE:-stable} export DIB_RELEASE=${DIB_RELEASE:-stable}
if [ -n "${DIB_DEBIAN_DISTRIBUTION_MIRROR:-}" ]; then
DIB_DISTRIBUTION_MIRROR=$DIB_DEBIAN_DISTRIBUTION_MIRROR
fi
export DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://ftp.us.debian.org/debian} export DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://ftp.us.debian.org/debian}
export DIB_DEBIAN_COMPONENTS=${DIB_DEBIAN_COMPONENTS:-main} export DIB_DEBIAN_COMPONENTS=${DIB_DEBIAN_COMPONENTS:-main}
export DIB_DEBIAN_COMPONENTS_WS=${DIB_DEBIAN_COMPONENTS//,/ } export DIB_DEBIAN_COMPONENTS_WS=${DIB_DEBIAN_COMPONENTS//,/ }

View File

@ -26,7 +26,10 @@ echo $DISTRO_NAME > /etc/hostname
# cloud images expect eth0 and eth1 to use dhcp. # cloud images expect eth0 and eth1 to use dhcp.
mkdir -p /etc/network/interfaces.d mkdir -p /etc/network/interfaces.d
if ! grep -E -q '^source(|-directory) /etc/network/interfaces.d/\*' /etc/network/interfaces; then
echo "source /etc/network/interfaces.d/*" >> /etc/network/interfaces echo "source /etc/network/interfaces.d/*" >> /etc/network/interfaces
echo 'Network configuration set to source /etc/network/interfaces.d/*'
fi
for interface in eth0 eth1; do for interface in eth0 eth1; do
cat << EOF | tee /etc/network/interfaces.d/$interface cat << EOF | tee /etc/network/interfaces.d/$interface
auto $interface auto $interface

View File

@ -17,3 +17,13 @@ configured properly before networking services are started.
On Gentoo based distributions we will install the dhcpcd package and On Gentoo based distributions we will install the dhcpcd package and
ensure the service starts at boot. This service automatically sets ensure the service starts at boot. This service automatically sets
up all interfaces found via dhcp and/or dhcpv6 (or SLAAC). up all interfaces found via dhcp and/or dhcpv6 (or SLAAC).
Environment Variables
---------------------
DIB_DHCP_TIMEOUT
:Required: No
:Default: 30
:Description: Amount of time in seconds that the systemd service will
wait to get an address.
:Example: DIB_DHCP_TIMEOUT=300

View File

@ -24,6 +24,7 @@ if [ "$DIB_INIT_SYSTEM" == "upstart" ]; then
elif [ "$DIB_INIT_SYSTEM" == "systemd" ]; then elif [ "$DIB_INIT_SYSTEM" == "systemd" ]; then
install -D -g root -o root -m 0644 ${SCRIPTDIR}/dhcp-interface@.service /usr/lib/systemd/system/dhcp-interface@.service install -D -g root -o root -m 0644 ${SCRIPTDIR}/dhcp-interface@.service /usr/lib/systemd/system/dhcp-interface@.service
install -D -g root -o root -m 0644 ${SCRIPTDIR}/dhcp-all-interfaces-udev.rules /etc/udev/rules.d/99-dhcp-all-interfaces.rules install -D -g root -o root -m 0644 ${SCRIPTDIR}/dhcp-all-interfaces-udev.rules /etc/udev/rules.d/99-dhcp-all-interfaces.rules
sed -i "s/TimeoutStartSec=DIB_DHCP_TIMEOUT/TimeoutStartSec=${DIB_DHCP_TIMEOUT:-30}s/" /usr/lib/systemd/system/dhcp-interface@.service
elif [ "$DIB_INIT_SYSTEM" == "sysv" ]; then elif [ "$DIB_INIT_SYSTEM" == "sysv" ]; then
install -D -g root -o root -m 0755 ${SCRIPTDIR}/dhcp-all-interfaces.init /etc/init.d/dhcp-all-interfaces install -D -g root -o root -m 0755 ${SCRIPTDIR}/dhcp-all-interfaces.init /etc/init.d/dhcp-all-interfaces
update-rc.d dhcp-all-interfaces defaults update-rc.d dhcp-all-interfaces defaults

View File

@ -38,7 +38,7 @@ function serialize_me() {
} }
function get_if_link() { function get_if_link() {
cat /sys/class/net/${1}/carrier cat /sys/class/net/${1}/carrier || echo 0
} }
function enable_interface() { function enable_interface() {
@ -87,11 +87,11 @@ function inspect_interface() {
elif [ "$mac_addr_type" != "0" ]; then elif [ "$mac_addr_type" != "0" ]; then
echo "Device has generated MAC, skipping." echo "Device has generated MAC, skipping."
else else
ip link set dev $interface up &>/dev/null
local has_link local has_link
local tries local tries
for ((tries = 0; tries < 20; tries++)); do for ((tries = 0; tries < 20; tries++)); do
# Need to set the link up on each iteration
ip link set dev $interface up &>/dev/null
has_link=$(get_if_link $interface) has_link=$(get_if_link $interface)
[ "$has_link" == "1" ] && break [ "$has_link" == "1" ] && break
sleep 1 sleep 1

View File

@ -1,17 +1,21 @@
[Unit] [Unit]
Description=DHCP interface %I Description=DHCP interface %i
Before=network-pre.target # We want to run after network.target so it doesn't try to bring
Wants=network-pre.target # up the interfaces a second time, but network-online should not
# be reached until after we've brought up the interfaces.
After=network.target
Before=network-online.target
Wants=network-online.target
ConditionPathExists=!/etc/sysconfig/network-scripts/ifcfg-%I ConditionPathExists=!/etc/sysconfig/network-scripts/ifcfg-%i
[Service] [Service]
Type=oneshot Type=oneshot
User=root User=root
ExecStartPre=/usr/local/sbin/dhcp-all-interfaces.sh %I ExecStartPre=/usr/local/sbin/dhcp-all-interfaces.sh %i
ExecStart=/sbin/ifup %I ExecStart=/sbin/ifup %i
RemainAfterExit=true RemainAfterExit=true
TimeoutStartSec=30s TimeoutStartSec=DIB_DHCP_TIMEOUT
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View File

@ -1 +1,2 @@
dhcp-client: dhcp-client:
ifupdown:

View File

@ -8,9 +8,13 @@
}, },
"suse": { "suse": {
"dhcp-client": "dhcp-client" "dhcp-client": "dhcp-client"
},
"debian": {
"ifupdown": "ifupdown"
} }
}, },
"default": { "default": {
"dhcp-client": "isc-dhcp-client" "dhcp-client": "isc-dhcp-client",
"ifupdown": ""
} }
} }

View File

@ -11,10 +11,6 @@ Due to a bug in the released version of urlgrabber, on many systems an
installation of urlgrabber from git is required. The git repository installation of urlgrabber from git is required. The git repository
can be found here: http://yum.baseurl.org/gitweb?p=urlgrabber.git;a=summary can be found here: http://yum.baseurl.org/gitweb?p=urlgrabber.git;a=summary
The `DIB_OFFLINE` or more specific `DIB_YUMCHROOT_USE_CACHE` This element sets the ``DIB_RELEASE`` var to 'fedora'. The release of
variables can be set to prefer the use of a pre-cached root filesystem fedora to be installed can be controlled through the ``DIB_RELEASE``
tarball. variable, which defaults the latest supported release.
This element sets the `DIB_RELEASE` var to 'fedora'. The release of fedora
to be installed can be controlled through the `DIB_RELEASE` variable, which
defaults to '21'.

View File

@ -1,2 +1,2 @@
export DISTRO_NAME=fedora export DISTRO_NAME=fedora
export DIB_RELEASE=${DIB_RELEASE:-24} export DIB_RELEASE=${DIB_RELEASE:-25}

View File

@ -1,2 +1,5 @@
export DISTRO_NAME=fedora export DISTRO_NAME=fedora
export DIB_RELEASE=${DIB_RELEASE:-24} export DIB_RELEASE=${DIB_RELEASE:-25}
if [ -n "${DIB_FEDORA_DISTRIBUTION_MIRROR:-}" ]; then
export DIB_DISTRIBUTION_MIRROR=$DIB_FEDORA_DISTRIBUTION_MIRROR
fi

View File

@ -33,15 +33,25 @@ function show_options {
} }
function fix_shm { function fix_shm {
# make /dev/shm dir if it doesn't exist
# mount tmpfs and chown it
# existing programs could be using /dev/shm
# This means it cannot be moved or backed
# up as a copy easily. The only remaining
# option is to move the link if it exists
# as a link. Existing programs will still
# hold the file handle of the original
# location open and new programs can use
# the fixed /dev/shm.
if [[ "${RUN_ONCE_SHM}" == '1' ]]; then if [[ "${RUN_ONCE_SHM}" == '1' ]]; then
if [[ -L /dev/shm.orig ]]; then if [[ ! -d /dev/shm ]]; then
rm /dev/shm.orig if [[ ! -e /dev/shm ]]; then
fi if [[ -L /dev/shm ]]; then
if [[ -d /dev/shm.orig ]]; then
rm -Rf /dev/shm.orig
fi
mv /dev/shm /dev/shm.orig mv /dev/shm /dev/shm.orig
fi
mkdir /dev/shm mkdir /dev/shm
fi
fi
mount -t tmpfs none /dev/shm mount -t tmpfs none /dev/shm
chmod 1777 /dev/shm chmod 1777 /dev/shm
RUN_ONCE_SHM='0' RUN_ONCE_SHM='0'
@ -49,11 +59,17 @@ function fix_shm {
} }
function unfix_shm { function unfix_shm {
# unmount tmpfs
# care about anything still using it
if [[ "${RUN_ONCE_SHM}" == '0' ]]; then if [[ "${RUN_ONCE_SHM}" == '0' ]]; then
umount /dev/shm umount /dev/shm
if fuser /dev/shm; then
rmdir /dev/shm rmdir /dev/shm
fi
if [[ -e /dev/shm.orig ]]; then
mv /dev/shm.orig /dev/shm mv /dev/shm.orig /dev/shm
fi fi
fi
} }
function install_gentoo_packages { function install_gentoo_packages {

View File

@ -18,6 +18,10 @@ Beyond installing the ironic-python-agent, this element does the following:
* Install the certificate if any, which is set to the environment variable * Install the certificate if any, which is set to the environment variable
``DIB_IPA_CERT`` for validating the authenticity by ironic-python-agent. The ``DIB_IPA_CERT`` for validating the authenticity by ironic-python-agent. The
certificate can be self-signed certificate or CA certificate. certificate can be self-signed certificate or CA certificate.
* Compresses initramfs with command specified in environment variable
``DIB_IPA_COMPRESS_CMD``, which is 'gzip' by default. This command should listen
for raw data from stdin and write compressed data to stdout. Command can be
with arguments.
This element outputs three files: This element outputs three files:

View File

@ -18,6 +18,8 @@ source $_LIB/img-functions
IMAGE_PATH=$(readlink -f $IMAGE_NAME) IMAGE_PATH=$(readlink -f $IMAGE_NAME)
cd $TARGET_ROOT cd $TARGET_ROOT
DIB_IPA_COMPRESS_CMD="${DIB_IPA_COMPRESS_CMD:-gzip}"
echo "#disabled" > ./tmp/fstab.new echo "#disabled" > ./tmp/fstab.new
sudo mv ./tmp/fstab.new ./etc/fstab sudo mv ./tmp/fstab.new ./etc/fstab
sudo ln -s ./sbin/init ./ sudo ln -s ./sbin/init ./
@ -42,7 +44,7 @@ sudo find . -xdev \
-path './var/cache/*' -prune -o \ -path './var/cache/*' -prune -o \
-name '*.pyc' -prune -o \ -name '*.pyc' -prune -o \
-name '*.pyo' -prune -o \ -name '*.pyo' -prune -o \
-print | sudo cpio -o -H newc | gzip > ${IMAGE_PATH}.initramfs -print | sudo cpio -o -H newc | ${DIB_IPA_COMPRESS_CMD} > ${IMAGE_PATH}.initramfs
select_boot_kernel_initrd $TARGET_ROOT select_boot_kernel_initrd $TARGET_ROOT
sudo cp $BOOTDIR/$KERNEL ${IMAGE_PATH}.kernel sudo cp $BOOTDIR/$KERNEL ${IMAGE_PATH}.kernel

View File

@ -1,4 +1,4 @@
# ironic-python-agent - Openstack Ironic Python Agnet # ironic-python-agent - OpenStack Ironic Python Agent
# #
# The ironic-python-agent helps ironic in deploying instances. # The ironic-python-agent helps ironic in deploying instances.

View File

@ -3,7 +3,7 @@ Description=Ironic Python Agent
After=network-online.target After=network-online.target
[Service] [Service]
ExecStartPre=/usr/sbin/modprobe vfat ExecStartPre=/sbin/modprobe vfat
ExecStart=/usr/local/bin/ironic-python-agent ExecStart=/usr/local/bin/ironic-python-agent
Restart=always Restart=always
RestartSec=30s RestartSec=30s

View File

@ -59,7 +59,7 @@ def collect_data(data, filename, element_name):
try: try:
objs = json.load(open(filename)) objs = json.load(open(filename))
except ValueError: except ValueError:
objs = yaml.load(open(filename)) objs = yaml.safe_load(open(filename))
for pkg_name, params in objs.items(): for pkg_name, params in objs.items():
if not params: if not params:
params = {} params = {}

View File

@ -2,7 +2,10 @@
"family": { "family": {
"gentoo": { "gentoo": {
"python-pip": "dev-python/pip", "python-pip": "dev-python/pip",
"python3-pip": "dev-python/pip",
"python-virtualenv": "dev-python/virtualenv", "python-virtualenv": "dev-python/virtualenv",
"python3-virtualenv": "dev-python/virtualenv",
"python-dev": "dev-lang/python",
"python3-dev": "dev-lang/python" "python3-dev": "dev-lang/python"
}, },
"suse": { "suse": {

View File

@ -3,7 +3,7 @@ proliant-tools
* This element can be used when building ironic-agent ramdisk. It * This element can be used when building ironic-agent ramdisk. It
enables ironic-agent ramdisk to do in-band cleaning operations specific enables ironic-agent ramdisk to do in-band cleaning operations specific
to HP ProLiant hardware. to HPE ProLiant hardware.
* Works with ubuntu and fedora distributions (on which ironic-agent * Works with ubuntu and fedora distributions (on which ironic-agent
element is supported). element is supported).
@ -11,20 +11,24 @@ proliant-tools
* Currently the following utilities are installed: * Currently the following utilities are installed:
+ `proliantutils`_ - This module registers an ironic-python-agent hardware + `proliantutils`_ - This module registers an ironic-python-agent hardware
manager for HP ProLiant hardware, which implements in-band cleaning manager for HPE ProLiant hardware, which implements in-band cleaning
steps. The latest version of ``proliantutils`` available is steps. The latest version of ``proliantutils`` available is
installed. This python module is released with Apache license. installed. This python module is released with Apache license.
+ `HP Smart Storage Administrator (HP SSA) CLI for Linux 64-bit`_ - This + `HPE Smart Storage Administrator (HPE SSA) CLI for Linux 64-bit`_ - This
utility is used by ``proliantutils`` library above for doing in-band RAID utility is used by ``proliantutils`` library above for doing in-band RAID
configuration on HP ProLiant hardware. Currently installed version is configuration on HPE ProLiant hardware. Currently installed version is
2.30. Newer version of ``hpssacli`` when available, may be installed to 2.60. Newer version of ``ssacli`` when available, may be installed to
the ramdisk by using the environment variable ``DIB_HPSSACLI_URL``. the ramdisk by using the environment variable ``DIB_SSACLI_URL``.
``DIB_HPSSACLI_URL`` should contain the HTTP(S) URL for downloading the ``DIB_SSACLI_URL`` should contain the HTTP(S) URL for downloading the
RPM package for ``hpssacli`` utility. Availability of newer versions can RPM package for ``ssacli`` utility. The old environmental variable
be in the Revision History in the above link. This utility is closed source ``DIB_HPSSACLI_URL``,a HTTP(S) URL for downloading the RPM package for
and is released with `HP End User License Agreement Enterprise Version`_. ``hpssacli`` utility, is deprecated. The ``hpssacli`` utility is not
supported anymore, use ``ssacli`` instead for the same functionality.
Availability of newer versions can be in the Revision History
in the above link. This utility is closed source and is released with
`HPE End User License Agreement Enterprise Version`_.
.. _`proliantutils`: https://pypi.python.org/pypi/proliantutils .. _`proliantutils`: https://pypi.python.org/pypi/proliantutils
.. _`HP Smart Storage Administrator (HP SSA) CLI for Linux 64-bit`: http://h20564.www2.hpe.com/hpsc/swd/public/detail?swItemId=MTX_b6a6acb9762443b182280db805 .. _`HPE Smart Storage Administrator (HPE SSA) CLI for Linux 64-bit`: http://h20564.www2.hpe.com/hpsc/swd/public/detail?swItemId=MTX_3d16386b418a443388c18da82f&swEnvOid=4181
.. _`HP End User License Agreement Enterprise Version`: ftp://ftp.hp.com/pub/softlib2/software1/doc/p2057331991/v33194/hpeula-en.html .. _`HPE End User License Agreement Enterprise Version`: https://downloads.hpe.com/pub/softlib2/software1/doc/p1796552785/v113125/eula-en.html

View File

@ -21,18 +21,22 @@ fi
set -eu set -eu
set -o pipefail set -o pipefail
# Set the below variable to allow hpssacli to be installed from custom URLs. # Set the below variable to allow ssacli to be installed from custom URLs.
DIB_HPSSACLI_URL=${DIB_HPSSACLI_URL:-https://ftp.hp.com/pub/softlib2/software1/pubsw-linux/p1857046646/v109216/hpssacli-2.30-6.0.x86_64.rpm} if [[ -n "${DIB_HPSSACLI_URL:=}" ]]; then
curl -k -o /tmp/hpssacli.rpm $DIB_HPSSACLI_URL echo "The environment variable DIB_HPSSACLI_URL is deprecated; use DIB_SSACLI_URL instead."
if [[ $DISTRO_NAME = "ubuntu" || $DISTRO_NAME = "debian" ]]; then
# There is no deb package for hpssacli. Install with alien.
alien -i /tmp/hpssacli.rpm
else
rpm -iv /tmp/hpssacli.rpm
fi fi
rm -f /tmp/hpssacli.rpm DIB_SSACLI_URL=${DIB_SSACLI_URL:-${DIB_HPSSACLI_URL:-https://downloads.hpe.com/pub/softlib2/software1/pubsw-linux/p1857046646/v123474/ssacli-2.60-19.0.x86_64.rpm}}
curl -k -o /tmp/ssacli.rpm $DIB_SSACLI_URL
if [[ $DISTRO_NAME = "ubuntu" || $DISTRO_NAME = "debian" ]]; then
# There is no deb package for ssacli. Install with alien.
alien -i /tmp/ssacli.rpm
else
rpm -iv /tmp/ssacli.rpm
fi
rm -f /tmp/ssacli.rpm
# Install proliantutils python module in the # Install proliantutils python module in the
# virtual environment of ironic-python-agent. # virtual environment of ironic-python-agent.

View File

@ -45,7 +45,7 @@ def main():
if use_pypi_python_org: if use_pypi_python_org:
indices.append('https://pypi.python.org/simple') indices.append('https://pypi.python.org/simple')
retries = os.environ.get('DIB_PIP_RETRIES') retries = os.environ.get('DIB_PIP_RETRIES')
with file(home + '/.pip/pip.conf', 'wt') as output: with open(home + '/.pip/pip.conf', 'wt') as output:
output.write('[global]\n') output.write('[global]\n')
output.write('log = %s/pip.log\n' % (home,)) output.write('log = %s/pip.log\n' % (home,))
output.write('index-url = %s\n' % (indices[0],)) output.write('index-url = %s\n' % (indices[0],))
@ -53,7 +53,7 @@ def main():
output.write('retries = %s\n' % retries) output.write('retries = %s\n' % retries)
for index in indices[1:]: for index in indices[1:]:
output.write('extra-index-url = %s\n' % (index,)) output.write('extra-index-url = %s\n' % (index,))
with file(home + '/.pydistutils.cfg', 'wt') as output: with open(home + '/.pydistutils.cfg', 'wt') as output:
output.write('[easy_install]\n') output.write('[easy_install]\n')
output.write('index_url = %s\n' % (easy_index,)) output.write('index_url = %s\n' % (easy_index,))

View File

@ -24,7 +24,7 @@ def load_service_mapping(filepath="/usr/share/svc-map/services"):
if not os.path.isfile(filepath): if not os.path.isfile(filepath):
return {} return {}
with open(filepath, 'r') as data_file: with open(filepath, 'r') as data_file:
return yaml.load(data_file.read()) return yaml.safe_load(data_file.read())
def main(): def main():

View File

@ -66,7 +66,7 @@ def main():
data_path = os.path.join(path, "svc-map") data_path = os.path.join(path, "svc-map")
if os.path.exists(data_path): if os.path.exists(data_path):
with open(data_path, 'r') as dataFile: with open(data_path, 'r') as dataFile:
data = yaml.load(dataFile.read()) data = yaml.safe_load(dataFile.read())
try: try:
service_names = merge_data( service_names = merge_data(
data, data,

View File

@ -6,7 +6,7 @@ Note: The ubuntu element is likely what you want unless you really know
you want this one for some reason. The ubuntu element gets a lot more testing you want this one for some reason. The ubuntu element gets a lot more testing
coverage and use. coverage and use.
Create a minimal image based on Ubuntu. We default to trusty but DIB_RELEASE Create a minimal image based on Ubuntu. We default to xenial but DIB_RELEASE
is mapped to any series of Ubuntu. is mapped to any series of Ubuntu.
If necessary, a custom apt keyring and debootstrap script can be If necessary, a custom apt keyring and debootstrap script can be

View File

@ -1,4 +1,8 @@
export DISTRO_NAME=ubuntu export DISTRO_NAME=ubuntu
export DIB_RELEASE=${DIB_RELEASE:-xenial} export DIB_RELEASE=${DIB_RELEASE:-xenial}
export DIB_DEBIAN_COMPONENTS=${DIB_DEBIAN_COMPONENTS:-main,restricted,universe} export DIB_DEBIAN_COMPONENTS=${DIB_DEBIAN_COMPONENTS:-main,restricted,universe}
if [ -n "${DIB_UBUNTU_DISTRIBUTION_MIRROR:-}" ]; then
DIB_DISTRIBUTION_MIRROR=$DIB_UBUNTU_DISTRIBUTION_MIRROR
fi
export DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://archive.ubuntu.com/ubuntu} export DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://archive.ubuntu.com/ubuntu}

View File

@ -21,14 +21,12 @@ fi
set -eu set -eu
set -o pipefail set -o pipefail
DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://archive.ubuntu.com/ubuntu}
# We should manage this in a betterer way # We should manage this in a betterer way
cat << EOF >/etc/apt/sources.list cat << EOF >/etc/apt/sources.list
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE ${DIB_DEBIAN_COMPONENTS//,/ }
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-updates main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-updates ${DIB_DEBIAN_COMPONENTS//,/ }
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-backports main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-backports ${DIB_DEBIAN_COMPONENTS//,/ }
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-security main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-security ${DIB_DEBIAN_COMPONENTS//,/ }
EOF EOF
# Need to update to retrieve the signed Release file # Need to update to retrieve the signed Release file

View File

@ -21,14 +21,12 @@ fi
set -eu set -eu
set -o pipefail set -o pipefail
DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://archive.ubuntu.com/ubuntu}
# We should manage this in a betterer way # We should manage this in a betterer way
sudo bash -c "cat << EOF >$TARGET_ROOT/etc/apt/sources.list sudo bash -c "cat << EOF >$TARGET_ROOT/etc/apt/sources.list
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE ${DIB_DEBIAN_COMPONENTS//,/ }
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-updates main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-updates ${DIB_DEBIAN_COMPONENTS//,/ }
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-backports main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-backports ${DIB_DEBIAN_COMPONENTS//,/ }
deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-security main restricted universe deb $DIB_DISTRIBUTION_MIRROR $DIB_RELEASE-security ${DIB_DEBIAN_COMPONENTS//,/ }
EOF" EOF"
sudo mount -t proc none $TARGET_ROOT/proc sudo mount -t proc none $TARGET_ROOT/proc

View File

@ -9,10 +9,6 @@ or fedora-minimal elements to get an actual base image.
Use of this element will require 'yum' and 'yum-utils' to be installed on Use of this element will require 'yum' and 'yum-utils' to be installed on
Ubuntu and Debian. Nothing additional is needed on Fedora or CentOS. Ubuntu and Debian. Nothing additional is needed on Fedora or CentOS.
The `DIB_OFFLINE` or more specific `DIB_YUMCHROOT_USE_CACHE`
variables can be set to prefer the use of a pre-cached root filesystem
tarball.
If you wish to have DHCP networking setup for eth0 & eth1 via If you wish to have DHCP networking setup for eth0 & eth1 via
/etc/sysconfig/network-config scripts/ifcfg-eth[0|1], set the /etc/sysconfig/network-config scripts/ifcfg-eth[0|1], set the
environment variable `DIB_YUM_MINIMAL_CREATE_INTERFACES` to `1`. environment variable `DIB_YUM_MINIMAL_CREATE_INTERFACES` to `1`.

View File

@ -31,8 +31,6 @@ if [ $ARCH = amd64 ]; then
ARCH=x86_64 ARCH=x86_64
fi fi
# Calling elements will need to set DISTRO_NAME and DIB_RELEASE # Calling elements will need to set DISTRO_NAME and DIB_RELEASE
DIB_YUMCHROOT_EXTRA_ARGS=${DIB_YUMCHROOT_EXTRA_ARGS:-}
YUMCHROOT_TARBALL=$DIB_IMAGE_CACHE/yumchroot-${DISTRO_NAME}-${DIB_RELEASE}-${ARCH}.tar.gz
# TODO Maybe deal with DIB_DISTRIBUTION_MIRROR # TODO Maybe deal with DIB_DISTRIBUTION_MIRROR
http_proxy=${http_proxy:-} http_proxy=${http_proxy:-}
YUM=${YUM:-yum} YUM=${YUM:-yum}
@ -189,10 +187,6 @@ function _install_pkg_manager {
$TARGET_ROOT/etc/yum.repos.d/*repo $TARGET_ROOT/etc/yum.repos.d/*repo
} }
if [ -n "$DIB_OFFLINE" -o -n "${DIB_YUMCHROOT_USE_CACHE:-}" ] && [ -f $YUMCHROOT_TARBALL ] ; then
echo $YUMCHROOT_TARBALL found in cache. Using.
sudo tar -C $TARGET_ROOT --numeric-owner -xzf $YUMCHROOT_TARBALL
else
# Note this is not usually done for root.d elements (see # Note this is not usually done for root.d elements (see
# lib/common-functions:mount_proc_dev_sys) but it's important that # lib/common-functions:mount_proc_dev_sys) but it's important that
# we have things like /dev/urandom around inside the chroot for # we have things like /dev/urandom around inside the chroot for
@ -286,10 +280,4 @@ else
sudo mv $newfile $(echo $newfile | sed 's/.rpmnew$//') sudo mv $newfile $(echo $newfile | sed 's/.rpmnew$//')
done done
echo Caching result in $YUMCHROOT_TARBALL
sudo tar --numeric-owner \
-C $TARGET_ROOT \
-zcf $YUMCHROOT_TARBALL --exclude='./tmp/*' .
fi
sudo rm -f ${TARGET_ROOT}/.extra_settings sudo rm -f ${TARGET_ROOT}/.extra_settings

View File

@ -143,24 +143,44 @@ function eval_run_d () {
trap - ERR trap - ERR
} }
# Get any process that appears to be running in $TMP_BUILD_DIR
function _get_chroot_processes () {
# Deselect kernel threads, and use a python script to avoid
# forking lots and lots of readlink / grep processes on a busy
# system.
ps --ppid 2 -p 2 --deselect -o pid= | xargs python -c '
import os
import sys
for pid in sys.argv[2:]:
try:
root = os.readlink("/proc/%s/root" % pid)
except:
continue
if sys.argv[1] in root:
print("%s" % pid)
' $TMP_BUILD_DIR
}
function kill_chroot_processes () { function kill_chroot_processes () {
local xtrace local xtrace
xtrace=$(set +o | grep xtrace) xtrace=$(set +o | grep xtrace)
set +o xtrace set +o xtrace
local pidname
if [ -z "${1}" ]; then if [ -z "${1}" ]; then
echo "ERROR: no chroot directory specified" echo "ERROR: no chroot directory specified"
exit 1 exit 1
fi fi
for piddir in /proc/[0-9]*; do
pid=${piddir##/proc/} for pid in $(_get_chroot_processes); do
pidname=$(cat $piddir/comm 2>/dev/null || echo "unknown")
# If there are open files from the chroot, just kill the process using # If there are open files from the chroot, just kill the process using
# these files. # these files. This is racy, but good enough
if sudo readlink -f $piddir/root | grep -q $TMP_BUILD_DIR; then pidname=$(cat $piddir/comm 2>/dev/null || echo "unknown")
echo "Killing chroot process: '${pidname}($pid)'" echo "Killing chroot process: '${pidname}($pid)'"
sudo kill $pid sudo kill $pid
fi
done done
$xtrace $xtrace

View File

@ -117,6 +117,10 @@ function finalise_base () {
unmount_dir $TMP_MOUNT_PATH/tmp unmount_dir $TMP_MOUNT_PATH/tmp
fi fi
find $TMP_MOUNT_PATH/tmp -maxdepth 1 -mindepth 1 | xargs sudo rm -rf --one-file-system find $TMP_MOUNT_PATH/tmp -maxdepth 1 -mindepth 1 | xargs sudo rm -rf --one-file-system
# Truncate /var/log files in preparation for first boot
sudo find ${TMP_MOUNT_PATH}/var/log -type f -exec cp /dev/null '{}' \;
# also /root logs
sudo find ${TMP_MOUNT_PATH}/root -name \*.log -type f -delete
} }
function compress_and_save_image () { function compress_and_save_image () {

View File

@ -22,7 +22,7 @@ Components
To generate kernel+ramdisk pair for use with ironic, use:: To generate kernel+ramdisk pair for use with ironic, use::
ramdisk-image-create -o deploy.ramdisk deploy-ironic ramdisk-image-create -o deploy.ramdisk ironic-agent
`element-info` `element-info`

View File

@ -0,0 +1,225 @@
..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
========================================
Block Device Setup Level 1: Partitioning
========================================
During the creation of a disk image (e.g. for a VM), there is the need
to create, setup, configure and afterwards detach some kind of storage
where the newly installed OS can be copied to or directly installed
in.
Remark
------
The implementation for this proposed changed already exists, was
discussed and is currently waiting for reviews [1]. To have a
complete overview over the block device setup, this document is
provided.
The dependencies are not implemented as they should be, because
* the spec process is currently in the phase of discussion and not
finalized [2],
* the implementation was finished and reviewed before the spec process
was described. [1]
Problem description
===================
When setting up a block device there is the need to partitioning the
block device.
Use Cases
---------
User (Actor: End User) wants to create multiple partitions in multiple
block devices where the new system is installed in.
The user wants to specify if the image should be optimized for speed
or for size.
The user wants the same behavior independently of the current host or
target OS.
Proposed change
===============
Move the partitioning functionality from
`elements/vm/block-device.d/10-partition` to a new block_device
python module: `level1/partitioning.py`.
Instead of using a program or a library, the data is written directly
with the help of python `file.write()` into the disk image.
Alternatives
------------
The existing implementation uses the `parted` program (old versions of
DIB were using `sfdisk`). The first implementations of this change
used the python-parted library.
All these approaches have a major drawback: they automatically
*optimize* based on information collected on the host system - and not
of the target system. Therefore the resulting partitioning layout may
lead to a degradation of performance on the target system. A change
in these external programs and libraries also lead to errors during a
DIB run [4] or there are general issues [7].
Also everything build around GNU parted falls under the GPL2 (not
LGPL2) license - which is incompatible with the currently used Apache
license in diskimage-builder.
API impact
----------
Extends the (optional) environment variable
``DIB_BLOCK_DEVICE_CONFIG``: a JSON structure to configure the
(complete) block device setup. For this proposal the second entry in
the original list will be used (the first part (as described in [5])
is used by the level 0 modules).
The name of this module is `partitioning` (element[0]). The value
(element[1]) is a dictionary.
For each disk that should be partitioned there exists one entry in the
dictionary. The key is the name of the disk (see [5] how to specify
names for block device level 0). The value is a dictionary that
defines the partitioning of each disk.
There are the following key / value pairs to define one disk:
label
(mandatory) Possible values: 'mbr'
This uses the Master Boot Record (MBR) layout for the disk.
(Later on this can be extended, e.g. using GPT).
align
(optional - default value '1MiB')
Set the alignment of the partition. This must be a multiple of the
block size (i.e. 512 bytes). The default of 1MiB (~ 2048 * 512
bytes blocks) is the default for modern systems and known to
perform well on a wide range of targets [6]. For each partition
there might be some space that is not used - which is `align` - 512
bytes. For the default of 1MiB exactly 1048064 bytes (= 1 MiB -
512 byte) are not used in the partition itself. Please note that
if a boot loader should be written to the disk or partition,
there is a need for some space. E.g. grub needs 63 * 512 byte
blocks between the MBR and the start of the partition data; this
means when grub will be installed, the `align` must be set at least
to 64 * 512 byte = 32 KiB.
partitions
(mandatory) A list of dictionaries. Each dictionary describes one
partition.
The following key / value pairs can be given for each partition:
name
(mandatory) The name of the partition. With the help of this name,
the partition can later be referenced, e.g. while creating a
file system.
flags
(optional) List of flags for the partition. Default: empty.
Possible values:
boot
Sets the boot flag for the partition
size
(mandatory) The size of the partition. The size can either be an
absolute number using units like `10GiB` or `1.75TB` or relative
(percentage) numbers: in the later case the size is calculated
based on the remaining free space.
Example:
::
["partitioning",
{"rootdisk": {
"label": "mbr",
"partitions":
[{"name": "part-01",
"flags": ["boot"],
"size": "100%"}]}}]
Security impact
---------------
None - functionality stays the same.
Other end user impact
---------------------
None.
Performance Impact
------------------
Measurements showed there is a performance degradation for the target
system of the partition table is not correctly aligned: writing takes
about three times longer on an incorrect aligned system vs. one that
is correctly aligned.
Implementation
==============
Assignee(s)
-----------
Primary assignee:
ansreas (andreas@florath.net)
Work Items
----------
None - this is already a small part of a bigger change [1].
Dependencies
============
None.
Testing
=======
The refactoring introduces no new test cases: the functionality is
tested during each existing test building VM images.
Documentation Impact
====================
End user: the additional environment variable is described.
References
==========
[1] Refactor: block-device handling (partitioning)
https://review.openstack.org/322671
[2] Add specs dir
https://review.openstack.org/336109
[3] Old implementation using parted-lib
https://review.openstack.org/#/c/322671/1..7/elements/block-device/pylib/block-device/level1/Partitioning.py
[4] ERROR: embedding is not possible, but this is required
for cross-disk install
http://lists.openstack.org/pipermail/openstack-dev/2016-June/097789.html
[5] Refactor: block-device handling (local loop)
https://review.openstack.org/319591
[6] Proper alignment of partitions on an Advanced Format HDD using Parted
http://askubuntu.com/questions/201164/proper-alignment-of-partitions-on-an-advanced-format-hdd-using-parted
[7] Red Hat Enterprise Linux 6 - Creating a 7TB Partition Using
parted Always Shows "The resulting partition is not properly
aligned for best performance"
http://h20564.www2.hpe.com/hpsc/doc/public/display?docId=emr_na-c03479326&DocLang=en&docLocale=en_US&jumpid=reg_r11944_uken_c-001_title_r0001
[8] Spec for changing the block device handling
https://review.openstack.org/336946

View File

@ -0,0 +1 @@
fedora-minimal

View File

@ -0,0 +1 @@
export DIB_INSTALLTYPE_pip_and_virtualenv=source

View File

@ -0,0 +1 @@
ubuntu-minimal

View File

@ -0,0 +1 @@
export DIB_INSTALLTYPE_pip_and_virtualenv=source

View File

@ -0,0 +1,8 @@
---
features:
- Cleaning logs was split, some was done in the
img-functions.finalise_base, some was done in the base element.
The version unifies tidy up logs in the lib/img-functions.
Especially when building docker container images the base element
cannot be used. This patch removes about some hundreds KB of
useless logs in cases when the base element is not used.

View File

@ -0,0 +1,9 @@
---
deprecations:
- The ``DIB_YUMCHROOT_USE_CACHE`` variable has been removed and the
Fedora and CentOS ``-minimal`` initial chroot will always be
created by the package manager. The default creation of a chroot
tarball is stopped for these elements. This unused option was
unsafe; there is no guarantee that the base system will not change
even between runs. Getting the package manager to reuse the cache
for the initial chroot install is future work.

View File

@ -12,3 +12,5 @@ oslosphinx>=4.7.0 # Apache-2.0
# releasenotes # releasenotes
reno>=1.8.0 # Apache-2.0 reno>=1.8.0 # Apache-2.0
coverage>=4.0 # Apache-2.0

10
tox.ini
View File

@ -9,8 +9,7 @@ install_command = pip install -U {opts} {packages}
deps= -r{toxinidir}/requirements.txt deps= -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands= commands=
python setup.py testr --slowest --testr-args='{posargs}' python setup.py test --slowest --testr-args='{posargs}'
passenv = ELEMENTS_PATH DIB_RELEASE DIB_DEBUG_TRACE DIB_DEV_USER_USERNAME DIB_DEV_USER_PWDLESS_SUDO DIB_DEV_USER_PASSWORD USER HOME http_proxy https_proxy
[testenv:pep8] [testenv:pep8]
commands = commands =
@ -25,9 +24,10 @@ envdir = {toxworkdir}/venv
commands = {toxinidir}/tests/run_functests.sh {posargs} commands = {toxinidir}/tests/run_functests.sh {posargs}
[testenv:cover] [testenv:cover]
setenv = PYTHON=coverage run --source diskimage_builder # NOTE: this is "setup.py test" (*not* testr) which is a pbr wrapper
commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi' # around testr. This understands --coverage-package-name which we
bash -c 'testr run --parallel ; RET=$? ; coverage combine ; coverage html -d ./cover $OMIT && exit $RET' # need due to underscore issues.
commands = python setup.py test --coverage --coverage-package-name diskimage_builder --testr-args='{posargs}'
[testenv:docs] [testenv:docs]
commands = python setup.py build_sphinx commands = python setup.py build_sphinx