17563a802e
Neutron Manager is loaded at the very startup of the neutron server process and with it plugins are loaded and stored for lookup purposes as their references are widely used across the entire neutron codebase. Rather than holding these references directly in NeutronManager this patch refactors the code so that these references are held by a plugin directory. This allows subprojects and other parts of the Neutron codebase to use the directory in lieu of the manager. The result is a leaner, cleaner, and more decoupled code. Usage pattern [1,2] can be translated to [3,4] respectively. [1] manager.NeutronManager.get_service_plugins()[FOO] [2] manager.NeutronManager.get_plugin() [3] directory.get_plugin(FOO) [4] directory.get_plugin() The more entangled part is in the neutron unit tests, where the use of the manager can be simplified as mocking is typically replaced by a call to the directory add_plugin() method. This is safe as each test case gets its own copy of the plugin directory. That said, unit tests that look more like API tests and that rely on the entire plugin machinery, need some tweaking to avoid stumbling into plugin loading failures. Due to the massive use of the manager, deprecation warnings are considered impractical as they cause logs to bloat out of proportion. Follow-up patches that show how to adopt the directory in neutron subprojects are tagged with topic:plugin-directory. NeutronLibImpact Partially-implements: blueprint neutron-lib Change-Id: I7331e914234c5f0b7abe836604fdd7e4067551cf
502 lines
18 KiB
Python
502 lines
18 KiB
Python
# Copyright 2010-2011 OpenStack Foundation
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""Base test cases for all neutron tests.
|
|
"""
|
|
|
|
import abc
|
|
import contextlib
|
|
import functools
|
|
import gc
|
|
import inspect
|
|
import os
|
|
import os.path
|
|
import sys
|
|
import weakref
|
|
|
|
from debtcollector import moves
|
|
import eventlet.timeout
|
|
import fixtures
|
|
import mock
|
|
from neutron_lib.plugins import directory
|
|
from oslo_concurrency.fixture import lockutils
|
|
from oslo_config import cfg
|
|
from oslo_messaging import conffixture as messaging_conffixture
|
|
from oslo_utils import excutils
|
|
from oslo_utils import fileutils
|
|
from oslo_utils import strutils
|
|
from oslotest import base
|
|
import six
|
|
import testtools
|
|
|
|
from neutron._i18n import _
|
|
from neutron.agent.linux import external_process
|
|
from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg
|
|
from neutron.callbacks import manager as registry_manager
|
|
from neutron.callbacks import registry
|
|
from neutron.common import config
|
|
from neutron.common import rpc as n_rpc
|
|
from neutron.common import utils
|
|
from neutron.db import agentschedulers_db
|
|
from neutron import manager
|
|
from neutron import policy
|
|
from neutron.quota import resource_registry
|
|
from neutron.tests import fake_notifier
|
|
from neutron.tests import post_mortem_debug
|
|
from neutron.tests import tools
|
|
|
|
|
|
CONF = cfg.CONF
|
|
CONF.import_opt('state_path', 'neutron.conf.common')
|
|
|
|
ROOTDIR = os.path.dirname(__file__)
|
|
ETCDIR = os.path.join(ROOTDIR, 'etc')
|
|
|
|
|
|
def etcdir(*p):
|
|
return os.path.join(ETCDIR, *p)
|
|
|
|
|
|
def fake_use_fatal_exceptions(*args):
|
|
return True
|
|
|
|
|
|
for _name in ('get_related_rand_names',
|
|
'get_rand_name',
|
|
'get_rand_device_name',
|
|
'get_related_rand_device_names'):
|
|
setattr(sys.modules[__name__], _name, moves.moved_function(
|
|
getattr(utils, _name), _name, __name__,
|
|
message='use "neutron.common.utils.%s" instead' % _name,
|
|
version='Newton', removal_version='Ocata'))
|
|
|
|
|
|
def bool_from_env(key, strict=False, default=False):
|
|
value = os.environ.get(key)
|
|
return strutils.bool_from_string(value, strict=strict, default=default)
|
|
|
|
|
|
def setup_test_logging(config_opts, log_dir, log_file_path_template):
|
|
# Have each test log into its own log file
|
|
config_opts.set_override('debug', True)
|
|
fileutils.ensure_tree(log_dir, mode=0o755)
|
|
log_file = sanitize_log_path(
|
|
os.path.join(log_dir, log_file_path_template))
|
|
config_opts.set_override('log_file', log_file)
|
|
config_opts.set_override('use_stderr', False)
|
|
config.setup_logging()
|
|
|
|
|
|
def sanitize_log_path(path):
|
|
# Sanitize the string so that its log path is shell friendly
|
|
replace_map = {' ': '-', '(': '_', ')': '_'}
|
|
for s, r in six.iteritems(replace_map):
|
|
path = path.replace(s, r)
|
|
return path
|
|
|
|
|
|
class AttributeDict(dict):
|
|
|
|
"""
|
|
Provide attribute access (dict.key) to dictionary values.
|
|
"""
|
|
|
|
def __getattr__(self, name):
|
|
"""Allow attribute access for all keys in the dict."""
|
|
if name in self:
|
|
return self[name]
|
|
raise AttributeError(_("Unknown attribute '%s'.") % name)
|
|
|
|
|
|
def _catch_timeout(f):
|
|
@functools.wraps(f)
|
|
def func(self, *args, **kwargs):
|
|
try:
|
|
return f(self, *args, **kwargs)
|
|
except eventlet.timeout.Timeout as e:
|
|
self.fail('Execution of this test timed out: %s' % e)
|
|
return func
|
|
|
|
|
|
class _CatchTimeoutMetaclass(abc.ABCMeta):
|
|
def __init__(cls, name, bases, dct):
|
|
super(_CatchTimeoutMetaclass, cls).__init__(name, bases, dct)
|
|
for name, method in inspect.getmembers(
|
|
# NOTE(ihrachys): we should use isroutine because it will catch
|
|
# both unbound methods (python2) and functions (python3)
|
|
cls, predicate=inspect.isroutine):
|
|
if name.startswith('test_'):
|
|
setattr(cls, name, _catch_timeout(method))
|
|
|
|
|
|
# Test worker cannot survive eventlet's Timeout exception, which effectively
|
|
# kills the whole worker, with all test cases scheduled to it. This metaclass
|
|
# makes all test cases convert Timeout exceptions into unittest friendly
|
|
# failure mode (self.fail).
|
|
@six.add_metaclass(_CatchTimeoutMetaclass)
|
|
class DietTestCase(base.BaseTestCase):
|
|
"""Same great taste, less filling.
|
|
|
|
BaseTestCase is responsible for doing lots of plugin-centric setup
|
|
that not all tests require (or can tolerate). This class provides
|
|
only functionality that is common across all tests.
|
|
"""
|
|
|
|
def setUp(self):
|
|
super(DietTestCase, self).setUp()
|
|
|
|
# FIXME(amuller): this must be called in the Neutron unit tests base
|
|
# class to initialize the DB connection string. Moving this may cause
|
|
# non-deterministic failures. Bug #1489098 for more info.
|
|
config.set_db_defaults()
|
|
|
|
# Configure this first to ensure pm debugging support for setUp()
|
|
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
|
|
if debugger:
|
|
self.addOnException(post_mortem_debug.get_exception_handler(
|
|
debugger))
|
|
|
|
# Make sure we see all relevant deprecation warnings when running tests
|
|
self.useFixture(tools.WarningsFixture())
|
|
|
|
# NOTE(ihrachys): oslotest already sets stopall for cleanup, but it
|
|
# does it using six.moves.mock (the library was moved into
|
|
# unittest.mock in Python 3.4). So until we switch to six.moves.mock
|
|
# everywhere in unit tests, we can't remove this setup. The base class
|
|
# is used in 3party projects, so we would need to switch all of them to
|
|
# six before removing the cleanup callback from here.
|
|
self.addCleanup(mock.patch.stopall)
|
|
|
|
self.addOnException(self.check_for_systemexit)
|
|
self.orig_pid = os.getpid()
|
|
|
|
tools.reset_random_seed()
|
|
|
|
def addOnException(self, handler):
|
|
|
|
def safe_handler(*args, **kwargs):
|
|
try:
|
|
return handler(*args, **kwargs)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception(reraise=False) as ctx:
|
|
self.addDetail('failure in exception handler %s' % handler,
|
|
testtools.content.TracebackContent(
|
|
(ctx.type_, ctx.value, ctx.tb), self))
|
|
|
|
return super(DietTestCase, self).addOnException(safe_handler)
|
|
|
|
def check_for_systemexit(self, exc_info):
|
|
if isinstance(exc_info[1], SystemExit):
|
|
if os.getpid() != self.orig_pid:
|
|
# Subprocess - let it just exit
|
|
raise
|
|
# This makes sys.exit(0) still a failure
|
|
self.force_failure = True
|
|
|
|
@contextlib.contextmanager
|
|
def assert_max_execution_time(self, max_execution_time=5):
|
|
with eventlet.timeout.Timeout(max_execution_time, False):
|
|
yield
|
|
return
|
|
self.fail('Execution of this test timed out')
|
|
|
|
def assertOrderedEqual(self, expected, actual):
|
|
expect_val = self.sort_dict_lists(expected)
|
|
actual_val = self.sort_dict_lists(actual)
|
|
self.assertEqual(expect_val, actual_val)
|
|
|
|
def sort_dict_lists(self, dic):
|
|
for key, value in six.iteritems(dic):
|
|
if isinstance(value, list):
|
|
dic[key] = sorted(value)
|
|
elif isinstance(value, dict):
|
|
dic[key] = self.sort_dict_lists(value)
|
|
return dic
|
|
|
|
def assertDictSupersetOf(self, expected_subset, actual_superset):
|
|
"""Checks that actual dict contains the expected dict.
|
|
|
|
After checking that the arguments are of the right type, this checks
|
|
that each item in expected_subset is in, and matches, what is in
|
|
actual_superset. Separate tests are done, so that detailed info can
|
|
be reported upon failure.
|
|
"""
|
|
if not isinstance(expected_subset, dict):
|
|
self.fail("expected_subset (%s) is not an instance of dict" %
|
|
type(expected_subset))
|
|
if not isinstance(actual_superset, dict):
|
|
self.fail("actual_superset (%s) is not an instance of dict" %
|
|
type(actual_superset))
|
|
for k, v in expected_subset.items():
|
|
self.assertIn(k, actual_superset)
|
|
self.assertEqual(v, actual_superset[k],
|
|
"Key %(key)s expected: %(exp)r, actual %(act)r" %
|
|
{'key': k, 'exp': v, 'act': actual_superset[k]})
|
|
|
|
|
|
class ProcessMonitorFixture(fixtures.Fixture):
|
|
"""Test fixture to capture and cleanup any spawn process monitor."""
|
|
|
|
def _setUp(self):
|
|
self.old_callable = (
|
|
external_process.ProcessMonitor._spawn_checking_thread)
|
|
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
|
|
"_spawn_checking_thread",
|
|
new=lambda x: self.record_calls(x))
|
|
p.start()
|
|
self.instances = []
|
|
self.addCleanup(self.stop)
|
|
|
|
def stop(self):
|
|
for instance in self.instances:
|
|
instance.stop()
|
|
|
|
def record_calls(self, instance):
|
|
self.old_callable(instance)
|
|
self.instances.append(instance)
|
|
|
|
|
|
class BaseTestCase(DietTestCase):
|
|
|
|
@staticmethod
|
|
def config_parse(conf=None, args=None):
|
|
"""Create the default configurations."""
|
|
# neutron.conf includes rpc_backend which needs to be cleaned up
|
|
if args is None:
|
|
args = []
|
|
args += ['--config-file', etcdir('neutron.conf')]
|
|
if conf is None:
|
|
config.init(args=args)
|
|
else:
|
|
conf(args)
|
|
|
|
def setUp(self):
|
|
super(BaseTestCase, self).setUp()
|
|
|
|
self.useFixture(lockutils.ExternalLockFixture())
|
|
|
|
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
|
|
|
|
self.addCleanup(CONF.reset)
|
|
self.useFixture(ProcessMonitorFixture())
|
|
|
|
self.useFixture(fixtures.MonkeyPatch(
|
|
'neutron_lib.exceptions.NeutronException.use_fatal_exceptions',
|
|
fake_use_fatal_exceptions))
|
|
|
|
self.useFixture(fixtures.MonkeyPatch(
|
|
'oslo_config.cfg.find_config_files',
|
|
lambda project=None, prog=None, extension=None: []))
|
|
|
|
self.setup_rpc_mocks()
|
|
self.setup_config()
|
|
self.setup_test_registry_instance()
|
|
self.setup_test_directory_instance()
|
|
|
|
policy.init()
|
|
self.addCleanup(policy.reset)
|
|
self.addCleanup(resource_registry.unregister_all_resources)
|
|
self.addCleanup(rpc_consumer_reg.clear)
|
|
|
|
def get_new_temp_dir(self):
|
|
"""Create a new temporary directory.
|
|
|
|
:returns fixtures.TempDir
|
|
"""
|
|
return self.useFixture(fixtures.TempDir())
|
|
|
|
def get_default_temp_dir(self):
|
|
"""Create a default temporary directory.
|
|
|
|
Returns the same directory during the whole test case.
|
|
|
|
:returns fixtures.TempDir
|
|
"""
|
|
if not hasattr(self, '_temp_dir'):
|
|
self._temp_dir = self.get_new_temp_dir()
|
|
return self._temp_dir
|
|
|
|
def get_temp_file_path(self, filename, root=None):
|
|
"""Returns an absolute path for a temporary file.
|
|
|
|
If root is None, the file is created in default temporary directory. It
|
|
also creates the directory if it's not initialized yet.
|
|
|
|
If root is not None, the file is created inside the directory passed as
|
|
root= argument.
|
|
|
|
:param filename: filename
|
|
:type filename: string
|
|
:param root: temporary directory to create a new file in
|
|
:type root: fixtures.TempDir
|
|
:returns absolute file path string
|
|
"""
|
|
root = root or self.get_default_temp_dir()
|
|
return root.join(filename)
|
|
|
|
def setup_rpc_mocks(self):
|
|
# don't actually start RPC listeners when testing
|
|
mock.patch(
|
|
'neutron.common.rpc.Connection.consume_in_threads',
|
|
return_value=[]).start()
|
|
|
|
self.useFixture(fixtures.MonkeyPatch(
|
|
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
|
|
|
|
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
|
|
self.messaging_conf.transport_driver = 'fake'
|
|
# NOTE(russellb) We want all calls to return immediately.
|
|
self.messaging_conf.response_timeout = 0
|
|
self.useFixture(self.messaging_conf)
|
|
|
|
self.addCleanup(n_rpc.clear_extra_exmods)
|
|
n_rpc.add_extra_exmods('neutron.test')
|
|
|
|
self.addCleanup(n_rpc.cleanup)
|
|
n_rpc.init(CONF)
|
|
|
|
def setup_test_registry_instance(self):
|
|
"""Give a private copy of the registry to each test."""
|
|
self._callback_manager = registry_manager.CallbacksManager()
|
|
mock.patch.object(registry, '_get_callback_manager',
|
|
return_value=self._callback_manager).start()
|
|
|
|
def setup_test_directory_instance(self):
|
|
"""Give a private copy of the directory to each test."""
|
|
# TODO(armax): switch to using a fixture to stop relying on stubbing
|
|
# out _get_plugin_directory directly.
|
|
self._plugin_directory = directory._PluginDirectory()
|
|
mock.patch.object(directory, '_get_plugin_directory',
|
|
return_value=self._plugin_directory).start()
|
|
|
|
def setup_config(self, args=None):
|
|
"""Tests that need a non-default config can override this method."""
|
|
self.config_parse(args=args)
|
|
|
|
def config(self, **kw):
|
|
"""Override some configuration values.
|
|
|
|
The keyword arguments are the names of configuration options to
|
|
override and their values.
|
|
|
|
If a group argument is supplied, the overrides are applied to
|
|
the specified configuration option group.
|
|
|
|
All overrides are automatically cleared at the end of the current
|
|
test by the fixtures cleanup process.
|
|
"""
|
|
group = kw.pop('group', None)
|
|
for k, v in six.iteritems(kw):
|
|
CONF.set_override(k, v, group)
|
|
|
|
def setup_coreplugin(self, core_plugin=None, load_plugins=True):
|
|
cp = PluginFixture(core_plugin)
|
|
self.useFixture(cp)
|
|
self.patched_dhcp_periodic = cp.patched_dhcp_periodic
|
|
self.patched_default_svc_plugins = cp.patched_default_svc_plugins
|
|
if load_plugins:
|
|
manager.init()
|
|
|
|
def setup_notification_driver(self, notification_driver=None):
|
|
self.addCleanup(fake_notifier.reset)
|
|
if notification_driver is None:
|
|
notification_driver = [fake_notifier.__name__]
|
|
cfg.CONF.set_override("notification_driver", notification_driver)
|
|
|
|
|
|
class PluginFixture(fixtures.Fixture):
|
|
|
|
def __init__(self, core_plugin=None):
|
|
super(PluginFixture, self).__init__()
|
|
self.core_plugin = core_plugin
|
|
|
|
def _setUp(self):
|
|
# Do not load default service plugins in the testing framework
|
|
# as all the mocking involved can cause havoc.
|
|
self.default_svc_plugins_p = mock.patch(
|
|
'neutron.manager.NeutronManager._get_default_service_plugins')
|
|
self.patched_default_svc_plugins = self.default_svc_plugins_p.start()
|
|
self.dhcp_periodic_p = mock.patch(
|
|
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
|
|
'add_periodic_dhcp_agent_status_check')
|
|
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
|
|
self.agent_health_check_p = mock.patch(
|
|
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
|
|
'add_agent_status_check_worker')
|
|
self.agent_health_check = self.agent_health_check_p.start()
|
|
# Plugin cleanup should be triggered last so that
|
|
# test-specific cleanup has a chance to release references.
|
|
self.addCleanup(self.cleanup_core_plugin)
|
|
if self.core_plugin is not None:
|
|
cfg.CONF.set_override('core_plugin', self.core_plugin)
|
|
|
|
def cleanup_core_plugin(self):
|
|
"""Ensure that the core plugin is deallocated."""
|
|
nm = manager.NeutronManager
|
|
if not nm.has_instance():
|
|
return
|
|
|
|
# TODO(marun) Fix plugins that do not properly initialize notifiers
|
|
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
|
|
|
|
# Perform a check for deallocation only if explicitly
|
|
# configured to do so since calling gc.collect() after every
|
|
# test increases test suite execution time by ~50%.
|
|
check_plugin_deallocation = (
|
|
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
|
|
if check_plugin_deallocation:
|
|
plugin = weakref.ref(nm._instance.plugin)
|
|
|
|
nm.clear_instance()
|
|
|
|
if check_plugin_deallocation:
|
|
gc.collect()
|
|
|
|
# TODO(marun) Ensure that mocks are deallocated?
|
|
if plugin() and not isinstance(plugin(), mock.Base):
|
|
raise AssertionError(
|
|
'The plugin for this test was not deallocated.')
|
|
|
|
|
|
class Timeout(fixtures.Fixture):
|
|
"""Setup per test timeouts.
|
|
|
|
In order to avoid test deadlocks we support setting up a test
|
|
timeout parameter read from the environment. In almost all
|
|
cases where the timeout is reached this means a deadlock.
|
|
|
|
A scaling factor allows extremely long tests to specify they
|
|
need more time.
|
|
"""
|
|
|
|
def __init__(self, timeout=None, scaling=1):
|
|
super(Timeout, self).__init__()
|
|
if timeout is None:
|
|
timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
|
try:
|
|
self.test_timeout = int(timeout)
|
|
except ValueError:
|
|
# If timeout value is invalid do not set a timeout.
|
|
self.test_timeout = 0
|
|
if scaling >= 1:
|
|
self.test_timeout *= scaling
|
|
else:
|
|
raise ValueError('scaling value must be >= 1')
|
|
|
|
def setUp(self):
|
|
super(Timeout, self).setUp()
|
|
if self.test_timeout > 0:
|
|
self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
|