Use testr to run nova unittests.

Convert nova from using nosetests to testr for its test runner. Some
tests had to be modified to get them to run properly under testr.

run_tests.sh has been updated to run testr instead of nosetests.

Coverage is collected by running subunit.run under coverage.py when the
coverage environment is selected.

Note that you will need to rebuild your virtualenvs as nose is being
removed from the dependency lists and is being replaced by testr. Tests
will run in different processes once this test is merged so you cannot
use test classes to pass information between tests. Each test should be
a proper independent unit. Additionally the -x and -d flags to
run_tests.sh have been removed as there are currently no decent
approximations for those functions.

Change-Id: I019ca098972ca749b195f59968cf21edd5ba9109
This commit is contained in:
Clark Boylan 2012-10-30 16:30:02 -07:00
parent 8652d71ade
commit 4abc8cc64f
17 changed files with 106 additions and 104 deletions

4
.testr.conf Normal file
View File

@ -0,0 +1,4 @@
[DEFAULT]
test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./nova/tests $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -191,6 +191,15 @@ class TestCase(testtools.TestCase):
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if (os.environ.get('OS_STDOUT_NOCAPTURE') != 'True' and
os.environ.get('OS_STDOUT_NOCAPTURE') != '1'):
stdout = self.useFixture(fixtures.DetailStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_NOCAPTURE') != 'True' and
os.environ.get('OS_STDERR_NOCAPTURE') != '1'):
stderr = self.useFixture(fixtures.DetailStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger('nova'))
self.useFixture(conf_fixture.ConfFixture(CONF))

View File

@ -25,6 +25,8 @@ import os
import string
import tempfile
import fixtures
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
@ -101,6 +103,7 @@ class CloudTestCase(test.TestCase):
super(CloudTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
self.useFixture(fixtures.FakeLogger('boto'))
def fake_show(meh, context, id):
return {'id': id,

View File

@ -36,7 +36,7 @@ from nova import test
FAKE_IMAGE_REF = 'fake-image-ref'
class _BaseTestCase(test.TestCase):
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
@ -223,7 +223,7 @@ class _BaseTestCase(test.TestCase):
self.assertEqual(port, backdoor_port)
class ConductorTestCase(_BaseTestCase):
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests"""
def setUp(self):
super(ConductorTestCase, self).setUp()
@ -231,7 +231,7 @@ class ConductorTestCase(_BaseTestCase):
self.stub_out_client_exceptions()
class ConductorRPCAPITestCase(_BaseTestCase):
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests"""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
@ -240,7 +240,7 @@ class ConductorRPCAPITestCase(_BaseTestCase):
self.conductor = conductor_rpcapi.ConductorAPI()
class ConductorAPITestCase(_BaseTestCase):
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests"""
def setUp(self):
super(ConductorAPITestCase, self).setUp()

View File

@ -43,9 +43,16 @@ class BaseTestCase(test.TestCase):
def tearDown(self):
super(BaseTestCase, self).tearDown()
has_errors = len([test for (test, msgs) in self._currentResult.errors
# python-subunit will wrap test results with a decorator.
# Need to access the decorated member of results to get the
# actual test result when using python-subunit.
if hasattr(self._currentResult, 'decorated'):
result = self._currentResult.decorated
else:
result = self._currentResult
has_errors = len([test for (test, msgs) in result.errors
if test.id() == self.id()]) > 0
failed = len([test for (test, msgs) in self._currentResult.failures
failed = len([test for (test, msgs) in result.failures
if test.id() == self.id()]) > 0
if not has_errors and not failed:

View File

@ -21,6 +21,8 @@ import mox
import os
import tempfile
import fixtures
from nova import context
import nova.db.api
from nova import exception
@ -83,6 +85,7 @@ class TestS3ImageService(test.TestCase):
def setUp(self):
super(TestS3ImageService, self).setUp()
self.context = context.RequestContext(None, None)
self.useFixture(fixtures.FakeLogger('boto'))
# set up one fixture to test shows, should have id '1'
nova.db.api.s3_image_create(self.context,

View File

@ -22,6 +22,8 @@ import uuid
from lxml import etree
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.api.openstack.compute import extensions
from nova.cloudpipe.pipelib import CloudPipe
from nova.compute import api
from nova import context

View File

@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.api.openstack.compute import extensions
from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers

View File

@ -29,6 +29,7 @@ try:
from boto.connection import HTTPResponse
except ImportError:
from httplib import HTTPResponse
import fixtures
import webob
from nova.api import auth
@ -221,6 +222,7 @@ class ApiEc2TestCase(test.TestCase):
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""

View File

@ -27,7 +27,7 @@ from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(test.TestCase):
class _ImageTestCase(object):
INSTANCES_PATH = '/fake'
def mock_create_image(self, image):
@ -111,7 +111,7 @@ class _ImageTestCase(test.TestCase):
self.mox.VerifyAll()
class RawTestCase(_ImageTestCase):
class RawTestCase(_ImageTestCase, test.TestCase):
SIZE = 1024
@ -161,7 +161,7 @@ class RawTestCase(_ImageTestCase):
self.mox.VerifyAll()
class Qcow2TestCase(_ImageTestCase):
class Qcow2TestCase(_ImageTestCase, test.TestCase):
SIZE = 1024 * 1024 * 1024
def setUp(self):
@ -224,7 +224,7 @@ class Qcow2TestCase(_ImageTestCase):
self.mox.VerifyAll()
class LvmTestCase(_ImageTestCase):
class LvmTestCase(_ImageTestCase, test.TestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024

View File

@ -57,7 +57,7 @@ def catch_notimplementederror(f):
return wrapped_func
class _FakeDriverBackendTestCase(test.TestCase):
class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
@ -142,7 +142,7 @@ class _FakeDriverBackendTestCase(test.TestCase):
super(_FakeDriverBackendTestCase, self).tearDown()
class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase):
class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class"""
@ -532,19 +532,19 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
class AbstractDriverTestCase(_VirtDriverTestCase):
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
class FakeConnectionTestCase(_VirtDriverTestCase):
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
super(FakeConnectionTestCase, self).setUp()
class LibvirtConnTestCase(_VirtDriverTestCase):
class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'

View File

@ -339,8 +339,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',

View File

@ -11,14 +11,11 @@ function usage {
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " -v, --verbose Display nosetests in the console"
echo " -d, --debug Enable pdb's prompt to be displayed during tests. This will run nosetests with --pdb option"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
@ -39,10 +36,8 @@ function process_option {
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
-v|--verbose) verbose=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
-*) testropts="$testropts $1";;
*) testrargs="$testrargs $1"
esac
}
@ -53,81 +48,61 @@ never_venv=0
force=0
no_site_packages=0
installvenvopts=
noseargs=
noseopts=
testrargs=
testropts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
recreate_db=1
verbose=0
debug=0
export NOSE_WITH_OPENSTACK=1
export NOSE_OPENSTACK_COLOR=1
export NOSE_OPENSTACK_RED=0.05
export NOSE_OPENSTACK_YELLOW=0.025
export NOSE_OPENSTACK_SHOW_ELAPSED=1
export NOSE_OPENSTACK_STDOUT=1
export LANG=en_US.UTF-8
export LANGUAGE=en_US:en
export LC_ALL=C
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
OS_STDOUT_NOCAPTURE=False
OS_STDERR_NOCAPTURE=False
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=nova"
fi
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function init_testr {
if [ ! -d .testrepository ]; then
${wrapper} testr init
fi
}
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
if [ "$debug" -eq 0 ];
then
# Just run the test suites in current environment
if [ "$verbose" -eq 1 ];
then
${wrapper} $NOSETESTS 2>&1 | tee nosetests.log
else
${wrapper} $NOSETESTS | tee nosetests.log
fi
# If we get some short import error right away, print the error log directly
RESULT=$?
if [ "$RESULT" -ne "0" ];
then
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
if [ "$ERRSIZE" -lt "40" ];
then
cat run_tests.log
fi
else
tests_run=$(awk '/^Ran/ {print $2}' nosetests.log)
if [ -z "$tests_run" ] || [ "$tests_run" -eq 0 ];
then
echo "ERROR: Zero tests ran, something is wrong!"
echo "This is usually caused by a parse error in some python"
echo "file or a failure to set up the environment (i.e. during"
echo "temporary database preparation). Running nosetests directly"
echo "may offer more clues."
return 1
fi
fi
else
${wrapper} $NOSETESTS --pdb
RESULT=$?
if [ $coverage -eq 1 ]; then
# Do not test test_coverage_ext when gathering coverage.
TESTRTESTS="$TESTRTESTS ^(?!.*test_coverage_ext).*$"
export PYTHON="${wrapper} coverage run --source nova --parallel-mode"
fi
# Just run the test suites in current environment
set +e
echo "Running \`${wrapper} $TESTRTESTS\`"
${wrapper} $TESTRTESTS
RESULT=$?
set -e
copy_subunit_log
return $RESULT
}
function copy_subunit_log {
LOGNAME=`cat .testrepository/next-stream`
LOGNAME=$(($LOGNAME - 1))
LOGNAME=".testrepository/${LOGNAME}"
cp $LOGNAME subunit.log
}
function run_pep8 {
echo "Running PEP8 and HACKING compliance check..."
@ -155,7 +130,7 @@ function run_pep8 {
}
NOSETESTS="nosetests $noseopts $noseargs"
TESTRTESTS="testr run --parallel $testropts $testrargs"
if [ $never_venv -eq 0 ]
then
@ -197,13 +172,14 @@ if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
init_testr
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and
# arguments (noseargs).
if [ -z "$noseargs" ]; then
# distinguish between options (testropts), which begin with a '-', and
# arguments (testrargs).
if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
@ -212,5 +188,6 @@ fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
${wrapper} coverage combine
${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
fi

View File

@ -21,10 +21,3 @@ input_file = nova/locale/nova.pot
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = nova/locale/nova.pot
[nosetests]
verbosity=2
cover-package = nova
cover-html = true
cover-erase = true
where=nova/tests

View File

@ -196,9 +196,6 @@ def install_dependencies(venv=VENV):
pip_install('-r', PIP_REQUIRES)
pip_install('-r', TEST_REQUIRES)
# Install nova into the virtual_env. No more path munging!
run_command([os.path.join(venv, 'bin/python'), 'setup.py', 'develop'])
def post_process():
get_distro().post_process()

View File

@ -2,14 +2,14 @@
distribute>=0.6.24
coverage
fixtures
discover
feedparser
fixtures>=0.3.10
mox==0.5.3
nose
testtools
openstack.nose_plugin>=0.7
nosehtmloutput
MySQL-python
pep8==1.3.3
pylint==0.25.2
python-subunit
sphinx>=1.1.2
feedparser
MySQL-python
testrepository>=0.0.8
testtools>=0.9.22

19
tox.ini
View File

@ -3,19 +3,16 @@ envlist = py26,py27,pep8
[testenv]
setenv = VIRTUAL_ENV={envdir}
NOSE_WITH_OPENSTACK=1
NOSE_OPENSTACK_COLOR=1
NOSE_OPENSTACK_RED=0.05
NOSE_OPENSTACK_YELLOW=0.025
NOSE_OPENSTACK_SHOW_ELAPSED=1
NOSE_OPENSTACK_STDOUT=1
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
OS_STDOUT_NOCAPTURE=False
OS_STDERR_NOCAPTURE=False
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
commands = nosetests {posargs}
commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
bash -c 'testr run --parallel {posargs} ; RET=$? ; echo "Slowest Tests" ; testr slowest && exit $RET'
[tox:jenkins]
sitepackages = True
@ -40,7 +37,13 @@ deps = pyflakes
commands = python tools/flakes.py nova
[testenv:cover]
setenv = NOSE_WITH_COVERAGE=1
# Need to omit DynamicallyCompiledCheetahTemplate.py from coverage because
# it ceases to exist post test run. Also do not run test_coverage_ext tests
# while gathering coverage as those tests conflict with coverage.
setenv = OMIT=--omit=DynamicallyCompiledCheetahTemplate.py
PYTHON=coverage run --source nova --parallel-mode
commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
bash -c 'testr run --parallel \^\(\?\!\.\*test_coverage_ext\)\.\*\$ ; RET=$? ; coverage combine ; coverage html -d ./cover $OMIT && exit $RET'
[testenv:venv]
commands = {posargs}