2012-02-04 17:40:31 -06:00
|
|
|
# Copyright 2012 United States Government as represented by the
|
2011-10-31 11:31:05 -07:00
|
|
|
# Administrator of the National Aeronautics and Space Administration.
|
|
|
|
# All Rights Reserved.
|
|
|
|
#
|
2012-02-04 17:40:31 -06:00
|
|
|
# Copyright 2012 Nebula, Inc.
|
2011-10-31 11:31:05 -07:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
|
2017-03-17 19:15:23 +00:00
|
|
|
from functools import wraps
|
2020-01-09 16:49:18 +02:00
|
|
|
import importlib
|
2017-10-06 16:34:01 +03:00
|
|
|
import logging
|
2012-05-09 17:37:18 -07:00
|
|
|
import os
|
2016-03-16 10:25:26 -06:00
|
|
|
import traceback
|
2020-03-26 19:45:37 +02:00
|
|
|
from unittest import mock
|
2015-02-24 15:16:52 -05:00
|
|
|
|
2014-01-03 17:31:49 +01:00
|
|
|
from django.conf import settings
|
2021-02-24 23:32:07 +09:00
|
|
|
from django.contrib.messages.storage import cookie as cookie_storage
|
2017-03-17 19:15:23 +00:00
|
|
|
from django.contrib.messages.storage import default_storage
|
2012-01-27 13:08:37 -08:00
|
|
|
from django.core.handlers import wsgi
|
2017-03-17 19:15:23 +00:00
|
|
|
from django.test.client import RequestFactory
|
2018-02-14 14:46:23 +02:00
|
|
|
from django.test import tag
|
2021-02-24 21:36:07 +09:00
|
|
|
from django.test import testcases
|
2017-12-12 13:30:33 +09:00
|
|
|
from django import urls
|
2016-02-11 14:45:32 +00:00
|
|
|
from django.utils import http
|
2016-01-15 17:42:47 +08:00
|
|
|
|
2014-07-29 16:57:39 +02:00
|
|
|
from openstack_auth import user
|
|
|
|
from openstack_auth import utils
|
2016-03-16 10:25:26 -06:00
|
|
|
from requests.packages.urllib3.connection import HTTPConnection
|
2013-10-17 08:50:32 -05:00
|
|
|
|
2014-03-15 00:50:18 -07:00
|
|
|
from horizon import base
|
|
|
|
from horizon import conf
|
2021-02-24 23:32:07 +09:00
|
|
|
from horizon import exceptions
|
2013-06-10 14:29:44 +02:00
|
|
|
from horizon.test import helpers as horizon_helpers
|
2012-10-04 15:43:40 -07:00
|
|
|
from openstack_dashboard import api
|
|
|
|
from openstack_dashboard import context_processors
|
2013-08-23 17:26:48 +04:00
|
|
|
from openstack_dashboard.test.test_data import utils as test_utils
|
2011-10-31 11:31:05 -07:00
|
|
|
|
|
|
|
|
2017-10-06 16:34:01 +03:00
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
2018-08-18 16:56:02 +09:00
|
|
|
# Makes output of failing tests much easier to read.
|
2012-01-27 13:08:37 -08:00
|
|
|
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
|
|
|
|
|
2017-12-31 03:20:54 +09:00
|
|
|
# Shortcuts to avoid importing horizon_helpers and for backward compatibility.
|
2018-02-02 02:33:23 +09:00
|
|
|
update_settings = horizon_helpers.update_settings
|
2017-12-31 03:20:54 +09:00
|
|
|
IsA = horizon_helpers.IsA
|
|
|
|
IsHttpRequest = horizon_helpers.IsHttpRequest
|
2018-02-02 02:33:23 +09:00
|
|
|
|
2012-01-27 13:08:37 -08:00
|
|
|
|
test: Ensure to stop mock when create_mocks decorator exits
During reviewing https://review.opendev.org/c/openstack/horizon/+/772603,
we noticed that a method decorated by create_mocks is called multiple time
in a single test. Previously create_mocks decorator does not stop mocking,
so this means that multiple active mock can exist for one method.
In general, it is not a good idea to mock a method multiple times at the
same time.
To cope with this situation, this commit ensures for create_mocks decorator
to stop active mocks when exiting the decorator.
This works for most cases, but it does not work only when mocking and
assertions are handled by separate methods and test logic is placed between
them. To cope with this, "stop_mock" optional argument is introduced.
FYI: Details on why "stop_mock" is needed.
I explored various ways but could not find a good way so far.
create_mocks needs to be a decorator as it needs to refer an object
reference (self). On the other hand, if we would like to merge the logic of
mocking and assertions (for example, _stub_api_calls() and _check_api_calls()
in openstack_dashboard/dashboards/project/overview/tests.py), a context
manager would be good as we would like to call it inside a class.
However, we cannot mix a decorator and a context manger as a decorator is
executed when a context manager is iniitialized and stopping mock in
the create_mocks decorator is done during the initialization of the
context manager and methods are not mocked when test code is run.
Change-Id: I9e37dc1eaa08adf36d11975fed6f5a0a90cdde52
2021-02-26 18:29:02 +09:00
|
|
|
def create_mocks(target_methods, stop_mock=True):
|
2017-10-25 16:33:06 +03:00
|
|
|
"""decorator to simplify setting up multiple mocks at once
|
|
|
|
|
2018-02-17 07:07:47 +09:00
|
|
|
:param target_methods: a dict to define methods to be patched using mock.
|
2017-10-25 16:33:06 +03:00
|
|
|
|
2018-02-17 07:07:47 +09:00
|
|
|
A key of "target_methods" is a target object whose attribute(s) are
|
|
|
|
patched.
|
|
|
|
|
|
|
|
A value of "target_methods" is a list of methods to be patched
|
2018-02-21 23:20:37 +09:00
|
|
|
using mock. Each element of the list can be a string or a tuple
|
2017-10-25 16:33:06 +03:00
|
|
|
consisting of two strings.
|
|
|
|
|
|
|
|
A string specifies a method name of "target" object to be mocked.
|
|
|
|
The decorator create a mock object for the method and the started mock
|
|
|
|
can be accessed via 'mock_<method-name>' of the test class.
|
|
|
|
For example, in case of::
|
|
|
|
|
2018-02-21 23:20:37 +09:00
|
|
|
@create_mocks({api.nova: ['server_list',
|
|
|
|
'flavor_list']})
|
|
|
|
def test_example(self):
|
|
|
|
...
|
|
|
|
self.mock_server_list.return_value = ...
|
|
|
|
self.mock_flavar_list.side_effect = ...
|
2017-10-25 16:33:06 +03:00
|
|
|
|
|
|
|
you can access the mocked method via "self.mock_server_list"
|
|
|
|
inside a test class.
|
|
|
|
|
|
|
|
The tuple version is useful when there are multiple methods with
|
|
|
|
a same name are mocked in a single test.
|
|
|
|
The format of the tuple is::
|
|
|
|
|
|
|
|
("<method-name-to-be-mocked>", "<attr-name>")
|
|
|
|
|
|
|
|
The decorator create a mock object for "<method-name-to-be-mocked>"
|
|
|
|
and the started mock can be accessed via 'mock_<attr-name>' of
|
|
|
|
the test class.
|
|
|
|
|
|
|
|
Example::
|
|
|
|
|
2018-02-17 07:07:47 +09:00
|
|
|
@create_mocks({
|
|
|
|
api.nova: [
|
|
|
|
'usage_get',
|
|
|
|
('tenant_absolute_limits', 'nova_tenant_absolute_limits'),
|
2018-02-21 23:20:37 +09:00
|
|
|
],
|
|
|
|
api.cinder: [
|
|
|
|
('tenant_absolute_limits', 'cinder_tenant_absolute_limits'),
|
|
|
|
],
|
|
|
|
})
|
2017-10-25 16:33:06 +03:00
|
|
|
def test_example(self):
|
|
|
|
...
|
|
|
|
self.mock_usage_get.return_value = ...
|
|
|
|
self.mock_nova_tenant_absolute_limits.return_value = ...
|
2018-02-21 23:20:37 +09:00
|
|
|
self.mock_cinder_tenant_absolute_limits.return_value = ...
|
2017-10-25 16:33:06 +03:00
|
|
|
...
|
|
|
|
|
test: Ensure to stop mock when create_mocks decorator exits
During reviewing https://review.opendev.org/c/openstack/horizon/+/772603,
we noticed that a method decorated by create_mocks is called multiple time
in a single test. Previously create_mocks decorator does not stop mocking,
so this means that multiple active mock can exist for one method.
In general, it is not a good idea to mock a method multiple times at the
same time.
To cope with this situation, this commit ensures for create_mocks decorator
to stop active mocks when exiting the decorator.
This works for most cases, but it does not work only when mocking and
assertions are handled by separate methods and test logic is placed between
them. To cope with this, "stop_mock" optional argument is introduced.
FYI: Details on why "stop_mock" is needed.
I explored various ways but could not find a good way so far.
create_mocks needs to be a decorator as it needs to refer an object
reference (self). On the other hand, if we would like to merge the logic of
mocking and assertions (for example, _stub_api_calls() and _check_api_calls()
in openstack_dashboard/dashboards/project/overview/tests.py), a context
manager would be good as we would like to call it inside a class.
However, we cannot mix a decorator and a context manger as a decorator is
executed when a context manager is iniitialized and stopping mock in
the create_mocks decorator is done during the initialization of the
context manager and methods are not mocked when test code is run.
Change-Id: I9e37dc1eaa08adf36d11975fed6f5a0a90cdde52
2021-02-26 18:29:02 +09:00
|
|
|
:param stop_mock: If True (default), mocks started in this decorator will
|
|
|
|
be stopped. Set this to False only if you cannot stop mocks when exiting
|
|
|
|
this decorator. The default value, True, should work for most cases.
|
2017-10-25 16:33:06 +03:00
|
|
|
"""
|
|
|
|
def wrapper(function):
|
|
|
|
@wraps(function)
|
|
|
|
def wrapped(inst, *args, **kwargs):
|
test: Ensure to stop mock when create_mocks decorator exits
During reviewing https://review.opendev.org/c/openstack/horizon/+/772603,
we noticed that a method decorated by create_mocks is called multiple time
in a single test. Previously create_mocks decorator does not stop mocking,
so this means that multiple active mock can exist for one method.
In general, it is not a good idea to mock a method multiple times at the
same time.
To cope with this situation, this commit ensures for create_mocks decorator
to stop active mocks when exiting the decorator.
This works for most cases, but it does not work only when mocking and
assertions are handled by separate methods and test logic is placed between
them. To cope with this, "stop_mock" optional argument is introduced.
FYI: Details on why "stop_mock" is needed.
I explored various ways but could not find a good way so far.
create_mocks needs to be a decorator as it needs to refer an object
reference (self). On the other hand, if we would like to merge the logic of
mocking and assertions (for example, _stub_api_calls() and _check_api_calls()
in openstack_dashboard/dashboards/project/overview/tests.py), a context
manager would be good as we would like to call it inside a class.
However, we cannot mix a decorator and a context manger as a decorator is
executed when a context manager is iniitialized and stopping mock in
the create_mocks decorator is done during the initialization of the
context manager and methods are not mocked when test code is run.
Change-Id: I9e37dc1eaa08adf36d11975fed6f5a0a90cdde52
2021-02-26 18:29:02 +09:00
|
|
|
patchers = []
|
2018-02-17 07:07:47 +09:00
|
|
|
for target, methods in target_methods.items():
|
|
|
|
for method in methods:
|
|
|
|
if isinstance(method, str):
|
|
|
|
method_mocked = method
|
|
|
|
attr_name = method
|
|
|
|
else:
|
|
|
|
method_mocked = method[0]
|
|
|
|
attr_name = method[1]
|
|
|
|
m = mock.patch.object(target, method_mocked)
|
test: Ensure to stop mock when create_mocks decorator exits
During reviewing https://review.opendev.org/c/openstack/horizon/+/772603,
we noticed that a method decorated by create_mocks is called multiple time
in a single test. Previously create_mocks decorator does not stop mocking,
so this means that multiple active mock can exist for one method.
In general, it is not a good idea to mock a method multiple times at the
same time.
To cope with this situation, this commit ensures for create_mocks decorator
to stop active mocks when exiting the decorator.
This works for most cases, but it does not work only when mocking and
assertions are handled by separate methods and test logic is placed between
them. To cope with this, "stop_mock" optional argument is introduced.
FYI: Details on why "stop_mock" is needed.
I explored various ways but could not find a good way so far.
create_mocks needs to be a decorator as it needs to refer an object
reference (self). On the other hand, if we would like to merge the logic of
mocking and assertions (for example, _stub_api_calls() and _check_api_calls()
in openstack_dashboard/dashboards/project/overview/tests.py), a context
manager would be good as we would like to call it inside a class.
However, we cannot mix a decorator and a context manger as a decorator is
executed when a context manager is iniitialized and stopping mock in
the create_mocks decorator is done during the initialization of the
context manager and methods are not mocked when test code is run.
Change-Id: I9e37dc1eaa08adf36d11975fed6f5a0a90cdde52
2021-02-26 18:29:02 +09:00
|
|
|
patchers.append(m)
|
2018-02-17 07:07:47 +09:00
|
|
|
setattr(inst, 'mock_%s' % attr_name, m.start())
|
test: Ensure to stop mock when create_mocks decorator exits
During reviewing https://review.opendev.org/c/openstack/horizon/+/772603,
we noticed that a method decorated by create_mocks is called multiple time
in a single test. Previously create_mocks decorator does not stop mocking,
so this means that multiple active mock can exist for one method.
In general, it is not a good idea to mock a method multiple times at the
same time.
To cope with this situation, this commit ensures for create_mocks decorator
to stop active mocks when exiting the decorator.
This works for most cases, but it does not work only when mocking and
assertions are handled by separate methods and test logic is placed between
them. To cope with this, "stop_mock" optional argument is introduced.
FYI: Details on why "stop_mock" is needed.
I explored various ways but could not find a good way so far.
create_mocks needs to be a decorator as it needs to refer an object
reference (self). On the other hand, if we would like to merge the logic of
mocking and assertions (for example, _stub_api_calls() and _check_api_calls()
in openstack_dashboard/dashboards/project/overview/tests.py), a context
manager would be good as we would like to call it inside a class.
However, we cannot mix a decorator and a context manger as a decorator is
executed when a context manager is iniitialized and stopping mock in
the create_mocks decorator is done during the initialization of the
context manager and methods are not mocked when test code is run.
Change-Id: I9e37dc1eaa08adf36d11975fed6f5a0a90cdde52
2021-02-26 18:29:02 +09:00
|
|
|
retval = function(inst, *args, **kwargs)
|
|
|
|
if stop_mock:
|
|
|
|
for m in patchers:
|
|
|
|
m.stop()
|
|
|
|
return retval
|
2017-10-25 16:33:06 +03:00
|
|
|
return wrapped
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
2017-04-29 11:26:45 +00:00
|
|
|
def _apply_panel_mocks(patchers=None):
|
|
|
|
"""Global mocks on panels that get called on all views."""
|
|
|
|
if patchers is None:
|
|
|
|
patchers = {}
|
2019-04-11 09:51:31 +09:00
|
|
|
mocked_methods = settings.TEST_GLOBAL_MOCKS_ON_PANELS
|
2017-04-29 11:26:45 +00:00
|
|
|
for name, mock_config in mocked_methods.items():
|
|
|
|
method = mock_config['method']
|
|
|
|
mock_params = {}
|
|
|
|
for param in ['return_value', 'side_effect']:
|
|
|
|
if param in mock_config:
|
|
|
|
mock_params[param] = mock_config[param]
|
|
|
|
patcher = mock.patch(method, **mock_params)
|
|
|
|
patcher.start()
|
|
|
|
patchers[name] = patcher
|
|
|
|
return patchers
|
|
|
|
|
|
|
|
|
2012-01-09 14:44:53 -08:00
|
|
|
class RequestFactoryWithMessages(RequestFactory):
|
2012-03-24 20:53:28 -07:00
|
|
|
def get(self, *args, **kwargs):
|
2020-09-15 14:08:17 +09:00
|
|
|
req = super().get(*args, **kwargs)
|
2012-04-13 21:46:04 -07:00
|
|
|
req.user = utils.get_user(req)
|
2012-03-24 20:53:28 -07:00
|
|
|
req.session = []
|
|
|
|
req._messages = default_storage(req)
|
|
|
|
return req
|
|
|
|
|
2012-01-09 14:44:53 -08:00
|
|
|
def post(self, *args, **kwargs):
|
2020-09-15 14:08:17 +09:00
|
|
|
req = super().post(*args, **kwargs)
|
2012-04-13 21:46:04 -07:00
|
|
|
req.user = utils.get_user(req)
|
2012-01-09 14:44:53 -08:00
|
|
|
req.session = []
|
|
|
|
req._messages = default_storage(req)
|
|
|
|
return req
|
|
|
|
|
|
|
|
|
2012-10-04 15:43:40 -07:00
|
|
|
class TestCase(horizon_helpers.TestCase):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Specialized base test case class for Horizon.
|
|
|
|
|
|
|
|
It gives access to numerous additional features:
|
2011-10-31 11:31:05 -07:00
|
|
|
|
2017-06-29 14:40:30 +09:00
|
|
|
* A full suite of test data through various attached objects and
|
|
|
|
managers (e.g. ``self.servers``, ``self.user``, etc.). See the
|
|
|
|
docs for
|
|
|
|
:class:`~openstack_dashboard.test.test_data.utils.TestData`
|
|
|
|
for more information.
|
|
|
|
* A set of request context data via ``self.context``.
|
|
|
|
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
|
|
|
|
framework via ``self.factory``.
|
|
|
|
* A ready-to-go request object via ``self.request``.
|
|
|
|
* The ability to override specific time data controls for easier testing.
|
|
|
|
* Several handy additional assertion methods.
|
2012-02-11 18:44:39 -08:00
|
|
|
"""
|
|
|
|
|
2016-03-16 10:25:26 -06:00
|
|
|
# To force test failures when unmocked API calls are attempted, provide
|
|
|
|
# boolean variable to store failures
|
|
|
|
missing_mocks = False
|
|
|
|
|
|
|
|
def fake_conn_request(self):
|
|
|
|
# print a stacktrace to illustrate where the unmocked API call
|
|
|
|
# is being made from
|
|
|
|
traceback.print_stack()
|
|
|
|
# forcing a test failure for missing mock
|
|
|
|
self.missing_mocks = True
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self._real_conn_request = HTTPConnection.connect
|
|
|
|
HTTPConnection.connect = self.fake_conn_request
|
2011-12-12 11:34:42 -08:00
|
|
|
|
2012-10-04 15:43:40 -07:00
|
|
|
self._real_context_processor = context_processors.openstack
|
|
|
|
context_processors.openstack = lambda request: self.context
|
2011-10-31 11:31:05 -07:00
|
|
|
|
2017-04-29 11:26:45 +00:00
|
|
|
self.patchers = _apply_panel_mocks()
|
2015-02-03 15:09:21 +01:00
|
|
|
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setUp()
|
2015-02-03 15:09:21 +01:00
|
|
|
|
|
|
|
def _setup_test_data(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super()._setup_test_data()
|
2015-02-03 15:09:21 +01:00
|
|
|
test_utils.load_test_data(self)
|
2017-06-21 14:00:27 +00:00
|
|
|
self.context = {
|
|
|
|
'authorized_tenants': self.tenants.list(),
|
|
|
|
'JS_CATALOG': context_processors.get_js_catalog(settings),
|
|
|
|
}
|
2015-02-03 15:09:21 +01:00
|
|
|
|
|
|
|
def _setup_factory(self):
|
|
|
|
# For some magical reason we need a copy of this here.
|
|
|
|
self.factory = RequestFactoryWithMessages()
|
|
|
|
|
2016-03-15 16:38:17 +03:00
|
|
|
def _setup_user(self, **kwargs):
|
2012-04-13 21:46:04 -07:00
|
|
|
self._real_get_user = utils.get_user
|
2012-02-11 18:44:39 -08:00
|
|
|
tenants = self.context['authorized_tenants']
|
2016-03-15 16:38:17 +03:00
|
|
|
base_kwargs = {
|
|
|
|
'id': self.user.id,
|
|
|
|
'token': self.token,
|
|
|
|
'username': self.user.name,
|
|
|
|
'domain_id': self.domain.id,
|
|
|
|
'user_domain_name': self.domain.name,
|
|
|
|
'tenant_id': self.tenant.id,
|
|
|
|
'service_catalog': self.service_catalog,
|
|
|
|
'authorized_tenants': tenants
|
|
|
|
}
|
|
|
|
base_kwargs.update(kwargs)
|
|
|
|
self.setActiveUser(**base_kwargs)
|
2015-02-03 15:09:21 +01:00
|
|
|
|
|
|
|
def _setup_request(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super()._setup_request()
|
2012-02-11 18:44:39 -08:00
|
|
|
self.request.session['token'] = self.token.id
|
2014-10-23 13:23:41 +02:00
|
|
|
|
2011-10-31 11:31:05 -07:00
|
|
|
def tearDown(self):
|
2016-03-16 10:25:26 -06:00
|
|
|
HTTPConnection.connect = self._real_conn_request
|
2012-10-04 15:43:40 -07:00
|
|
|
context_processors.openstack = self._real_context_processor
|
2012-04-13 21:46:04 -07:00
|
|
|
utils.get_user = self._real_get_user
|
2014-09-12 16:54:47 +03:00
|
|
|
mock.patch.stopall()
|
2020-09-15 14:08:17 +09:00
|
|
|
super().tearDown()
|
2011-10-31 11:31:05 -07:00
|
|
|
|
2016-03-16 10:25:26 -06:00
|
|
|
# cause a test failure if an unmocked API call was attempted
|
|
|
|
if self.missing_mocks:
|
|
|
|
raise AssertionError("An unmocked API call was made.")
|
|
|
|
|
2011-11-15 17:26:15 -08:00
|
|
|
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
|
2014-09-30 22:41:11 +09:00
|
|
|
service_catalog=None, tenant_name=None, roles=None,
|
2015-05-29 11:10:12 -06:00
|
|
|
authorized_tenants=None, enabled=True, domain_id=None,
|
|
|
|
user_domain_name=None):
|
2012-04-13 21:46:04 -07:00
|
|
|
def get_user(request):
|
2021-11-02 17:05:46 +01:00
|
|
|
ret = user.User(
|
|
|
|
id=id,
|
|
|
|
token=token,
|
|
|
|
user=username,
|
|
|
|
domain_id=domain_id,
|
|
|
|
user_domain_name=user_domain_name,
|
|
|
|
tenant_id=tenant_id,
|
|
|
|
tenant_name=tenant_name,
|
|
|
|
service_catalog=service_catalog,
|
|
|
|
roles=roles,
|
|
|
|
enabled=enabled,
|
|
|
|
authorized_tenants=authorized_tenants,
|
|
|
|
endpoint=settings.OPENSTACK_KEYSTONE_URL,
|
|
|
|
)
|
|
|
|
ret._is_system_user = False
|
|
|
|
return ret
|
2012-04-13 21:46:04 -07:00
|
|
|
utils.get_user = get_user
|
2011-10-31 11:31:05 -07:00
|
|
|
|
|
|
|
def assertRedirectsNoFollow(self, response, expected_url):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Check for redirect.
|
|
|
|
|
|
|
|
Asserts that the given response issued a 302 redirect without
|
2012-02-11 18:44:39 -08:00
|
|
|
processing the view which is redirected to.
|
|
|
|
"""
|
2021-02-24 23:45:45 +09:00
|
|
|
if response.has_header('location'):
|
|
|
|
loc = response['location']
|
|
|
|
else:
|
|
|
|
loc = ''
|
2017-12-17 05:17:43 +09:00
|
|
|
loc = http.urlunquote(loc)
|
|
|
|
expected_url = http.urlunquote(expected_url)
|
|
|
|
self.assertEqual(loc, expected_url)
|
2011-10-31 11:31:05 -07:00
|
|
|
self.assertEqual(response.status_code, 302)
|
|
|
|
|
2012-02-11 18:44:39 -08:00
|
|
|
def assertNoFormErrors(self, response, context_name="form"):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Checks for no form errors.
|
|
|
|
|
|
|
|
Asserts that the response either does not contain a form in its
|
2012-02-11 18:44:39 -08:00
|
|
|
context, or that if it does, that form has no errors.
|
|
|
|
"""
|
|
|
|
context = getattr(response, "context", {})
|
|
|
|
if not context or context_name not in context:
|
|
|
|
return True
|
|
|
|
errors = response.context[context_name]._errors
|
|
|
|
assert len(errors) == 0, \
|
2014-09-30 22:41:11 +09:00
|
|
|
"Unexpected errors were found on the form: %s" % errors
|
2012-02-11 18:44:39 -08:00
|
|
|
|
2012-03-18 20:57:12 -07:00
|
|
|
def assertFormErrors(self, response, count=0, message=None,
|
|
|
|
context_name="form"):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Check for form errors.
|
|
|
|
|
|
|
|
Asserts that the response does contain a form in its
|
2012-03-14 11:59:19 +08:00
|
|
|
context, and that form has errors, if count were given,
|
|
|
|
it must match the exact numbers of errors
|
|
|
|
"""
|
|
|
|
context = getattr(response, "context", {})
|
|
|
|
assert (context and context_name in context), \
|
|
|
|
"The response did not contain a form."
|
|
|
|
errors = response.context[context_name]._errors
|
|
|
|
if count:
|
|
|
|
assert len(errors) == count, \
|
2013-09-05 14:38:58 +08:00
|
|
|
"%d errors were found on the form, %d expected" % \
|
|
|
|
(len(errors), count)
|
2021-02-24 21:36:07 +09:00
|
|
|
if message:
|
|
|
|
text = testcases.assert_and_parse_html(
|
|
|
|
self, message, None, '"message" contains invalid HTML:')
|
|
|
|
content = testcases.assert_and_parse_html(
|
|
|
|
self, str(errors), None,
|
|
|
|
'"_errors" in the response context is not valid HTML:')
|
|
|
|
match_count = content.count(text)
|
|
|
|
self.assertGreaterEqual(match_count, 1)
|
2012-03-14 11:59:19 +08:00
|
|
|
else:
|
|
|
|
assert len(errors) > 0, "No errors were found on the form"
|
|
|
|
|
2015-02-25 11:47:33 +11:00
|
|
|
def assertStatusCode(self, response, expected_code):
|
|
|
|
"""Validates an expected status code.
|
|
|
|
|
|
|
|
Matches camel case of other assert functions
|
|
|
|
"""
|
|
|
|
if response.status_code == expected_code:
|
|
|
|
return
|
|
|
|
self.fail('status code %r != %r: %s' % (response.status_code,
|
|
|
|
expected_code,
|
|
|
|
response.content))
|
|
|
|
|
2015-02-24 15:16:52 -05:00
|
|
|
def assertItemsCollectionEqual(self, response, items_list):
|
2015-08-28 12:23:23 +02:00
|
|
|
self.assertEqual(response.json, {"items": items_list})
|
2015-02-24 15:16:52 -05:00
|
|
|
|
2015-11-17 12:36:31 -06:00
|
|
|
def getAndAssertTableRowAction(self, response, table_name,
|
|
|
|
action_name, row_id):
|
|
|
|
table = response.context[table_name + '_table']
|
2020-01-09 16:49:18 +02:00
|
|
|
rows = list(filter(lambda x: x.id == row_id, table.data))
|
2015-11-17 12:36:31 -06:00
|
|
|
self.assertEqual(1, len(rows),
|
|
|
|
"Did not find a row matching id '%s'" % row_id)
|
|
|
|
row_actions = table.get_row_actions(rows[0])
|
2020-01-09 16:49:18 +02:00
|
|
|
actions = list(filter(lambda x: x.name == action_name, row_actions))
|
2015-11-17 12:36:31 -06:00
|
|
|
|
2015-12-04 13:59:40 +00:00
|
|
|
msg_args = (action_name, table_name, row_id)
|
2017-02-06 08:40:13 +01:00
|
|
|
self.assertGreater(
|
|
|
|
len(actions), 0,
|
2015-12-04 13:59:40 +00:00
|
|
|
"No action named '%s' found in '%s' table for id '%s'" % msg_args)
|
2015-11-17 12:36:31 -06:00
|
|
|
|
|
|
|
self.assertEqual(
|
2015-12-04 13:59:40 +00:00
|
|
|
1, len(actions),
|
|
|
|
"Multiple actions named '%s' found in '%s' table for id '%s'"
|
|
|
|
% msg_args)
|
2015-11-17 12:36:31 -06:00
|
|
|
|
2015-12-04 13:59:40 +00:00
|
|
|
return actions[0]
|
2015-11-17 12:36:31 -06:00
|
|
|
|
|
|
|
def getAndAssertTableAction(self, response, table_name, action_name):
|
|
|
|
|
|
|
|
table = response.context[table_name + '_table']
|
|
|
|
table_actions = table.get_table_actions()
|
2020-01-09 16:49:18 +02:00
|
|
|
actions = list(filter(lambda x: x.name == action_name, table_actions))
|
2015-12-04 13:59:40 +00:00
|
|
|
msg_args = (action_name, table_name)
|
2017-02-06 08:40:13 +01:00
|
|
|
self.assertGreater(
|
|
|
|
len(actions), 0,
|
2015-12-04 13:59:40 +00:00
|
|
|
"No action named '%s' found in '%s' table" % msg_args)
|
2015-11-17 12:36:31 -06:00
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
1, len(actions),
|
2015-12-04 13:59:40 +00:00
|
|
|
"More than one action named '%s' found in '%s' table" % msg_args)
|
2015-11-17 12:36:31 -06:00
|
|
|
|
|
|
|
return actions[0]
|
|
|
|
|
2015-02-25 11:47:33 +11:00
|
|
|
@staticmethod
|
|
|
|
def mock_rest_request(**args):
|
|
|
|
mock_args = {
|
2017-12-12 13:48:55 +09:00
|
|
|
'user.is_authenticated': True,
|
2015-02-25 11:47:33 +11:00
|
|
|
'is_ajax.return_value': True,
|
|
|
|
'policy.check.return_value': True,
|
|
|
|
'body': ''
|
|
|
|
}
|
|
|
|
mock_args.update(args)
|
|
|
|
return mock.Mock(**mock_args)
|
|
|
|
|
2018-02-17 17:49:39 +09:00
|
|
|
def assert_mock_multiple_calls_with_same_arguments(
|
|
|
|
self, mocked_method, count, expected_call):
|
|
|
|
self.assertEqual(count, mocked_method.call_count)
|
|
|
|
mocked_method.assert_has_calls([expected_call] * count)
|
|
|
|
|
2018-10-25 15:54:13 +08:00
|
|
|
def assertNoWorkflowErrors(self, response, context_name="workflow"):
|
|
|
|
"""Checks for no workflow errors.
|
|
|
|
|
|
|
|
Asserts that the response either does not contain a workflow in its
|
|
|
|
context, or that if it does, that workflow has no errors.
|
|
|
|
"""
|
|
|
|
context = getattr(response, "context", {})
|
|
|
|
if not context or context_name not in context:
|
|
|
|
return True
|
|
|
|
errors = [step.action._errors for step in
|
|
|
|
response.context[context_name].steps]
|
|
|
|
self.assertEqual(
|
|
|
|
0, len(errors),
|
|
|
|
"Unexpected errors were found on the workflow: %s" % errors)
|
|
|
|
|
2018-11-14 14:20:36 +08:00
|
|
|
def assertWorkflowErrors(self, response, count=0, message=None,
|
|
|
|
context_name="workflow"):
|
|
|
|
"""Check for workflow errors.
|
|
|
|
|
|
|
|
Asserts that the response does contain a workflow in its
|
|
|
|
context, and that workflow has errors, if count were given,
|
|
|
|
it must match the exact numbers of errors
|
|
|
|
"""
|
|
|
|
context = getattr(response, "context", {})
|
|
|
|
self.assertIn(context_name, context,
|
|
|
|
msg="The response did not contain a workflow.")
|
|
|
|
errors = {}
|
|
|
|
for step in response.context[context_name].steps:
|
|
|
|
errors.update(step.action._errors)
|
|
|
|
if count:
|
|
|
|
self.assertEqual(
|
|
|
|
count, len(errors),
|
|
|
|
"%d errors were found on the workflow, %d expected" %
|
|
|
|
(len(errors), count))
|
2020-01-09 16:49:18 +02:00
|
|
|
if message and message not in str(errors):
|
2018-11-14 14:20:36 +08:00
|
|
|
self.fail("Expected message not found, instead found: %s"
|
|
|
|
% ["%s: %s" % (key, [e for e in field_errors]) for
|
|
|
|
(key, field_errors) in errors.items()])
|
|
|
|
else:
|
|
|
|
self.assertGreater(
|
|
|
|
len(errors), 0,
|
|
|
|
"No errors were found on the workflow")
|
|
|
|
|
2021-02-24 23:32:07 +09:00
|
|
|
def assertCookieMessage(self, response, expected_msg, detail_msg=None):
|
|
|
|
data = response.cookies["messages"]
|
|
|
|
storage = cookie_storage.CookieStorage(None)
|
|
|
|
messages = [m.message for m in storage._decode(data.value)]
|
|
|
|
if detail_msg is not None:
|
|
|
|
_expected = exceptions._append_detail(expected_msg, detail_msg)
|
|
|
|
else:
|
|
|
|
_expected = expected_msg
|
|
|
|
self.assertIn(_expected, messages)
|
|
|
|
|
2012-02-11 18:44:39 -08:00
|
|
|
|
|
|
|
class BaseAdminViewTests(TestCase):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Sets an active user with the "admin" role.
|
|
|
|
|
|
|
|
For testing admin-only views and functionality.
|
2012-02-11 18:44:39 -08:00
|
|
|
"""
|
2012-03-16 16:52:35 -07:00
|
|
|
def setActiveUser(self, *args, **kwargs):
|
|
|
|
if "roles" not in kwargs:
|
|
|
|
kwargs['roles'] = [self.roles.admin._info]
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setActiveUser(*args, **kwargs)
|
2012-02-11 18:44:39 -08:00
|
|
|
|
2013-06-05 08:24:03 -06:00
|
|
|
def setSessionValues(self, **kwargs):
|
|
|
|
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
|
2020-01-09 16:49:18 +02:00
|
|
|
engine = importlib.import_module(settings.SESSION_ENGINE)
|
2013-06-05 08:24:03 -06:00
|
|
|
store = engine.SessionStore()
|
|
|
|
for key in kwargs:
|
|
|
|
store[key] = kwargs[key]
|
|
|
|
self.request.session[key] = kwargs[key]
|
|
|
|
store.save()
|
|
|
|
self.session = store
|
|
|
|
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
|
|
|
|
|
2012-02-11 18:44:39 -08:00
|
|
|
|
2018-11-30 17:33:21 +02:00
|
|
|
class APITestCase(TestCase):
|
|
|
|
def setUp(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setUp()
|
2018-11-30 17:33:21 +02:00
|
|
|
utils.patch_middleware_get_user()
|
2012-02-11 18:44:39 -08:00
|
|
|
|
2015-11-17 10:19:59 -06:00
|
|
|
|
2018-08-18 16:56:02 +09:00
|
|
|
# APIMockTestCase was introduced to support mox to mock migration smoothly
|
|
|
|
# but it turns we have still users of APITestCase.
|
|
|
|
# We keep both for a while.
|
|
|
|
# Looking at the usage of these classes, it seems better to drop this one.
|
|
|
|
# TODO(amotoki): Clean up APIMockTestCase usage in horizon plugins.
|
|
|
|
APIMockTestCase = APITestCase
|
2017-12-30 16:40:31 +09:00
|
|
|
|
2017-10-06 16:34:01 +03:00
|
|
|
|
2015-01-26 15:12:27 +00:00
|
|
|
# Need this to test both Glance API V1 and V2 versions
|
|
|
|
class ResetImageAPIVersionMixin(object):
|
|
|
|
def setUp(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setUp()
|
2015-01-26 15:12:27 +00:00
|
|
|
api.glance.VERSIONS.clear_active_cache()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
api.glance.VERSIONS.clear_active_cache()
|
2020-09-15 14:08:17 +09:00
|
|
|
super().tearDown()
|
2015-01-26 15:12:27 +00:00
|
|
|
|
|
|
|
|
2020-04-18 08:38:36 +09:00
|
|
|
@horizon_helpers.pytest_mark('selenium')
|
2018-02-14 14:46:23 +02:00
|
|
|
@tag('selenium')
|
2012-11-16 21:12:09 +00:00
|
|
|
class SeleniumTestCase(horizon_helpers.SeleniumTestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setUp()
|
2012-11-16 21:12:09 +00:00
|
|
|
|
2013-08-23 17:26:48 +04:00
|
|
|
test_utils.load_test_data(self)
|
2012-11-16 21:12:09 +00:00
|
|
|
|
|
|
|
self._real_get_user = utils.get_user
|
|
|
|
self.setActiveUser(id=self.user.id,
|
|
|
|
token=self.token,
|
|
|
|
username=self.user.name,
|
|
|
|
tenant_id=self.tenant.id,
|
|
|
|
service_catalog=self.service_catalog,
|
|
|
|
authorized_tenants=self.tenants.list())
|
2017-04-29 11:26:45 +00:00
|
|
|
self.patchers = _apply_panel_mocks()
|
2012-11-16 21:12:09 +00:00
|
|
|
os.environ["HORIZON_TEST_RUN"] = "True"
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
utils.get_user = self._real_get_user
|
2014-09-12 16:54:47 +03:00
|
|
|
mock.patch.stopall()
|
2012-11-16 21:12:09 +00:00
|
|
|
del os.environ["HORIZON_TEST_RUN"]
|
|
|
|
|
|
|
|
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
|
2014-09-30 22:41:11 +09:00
|
|
|
service_catalog=None, tenant_name=None, roles=None,
|
|
|
|
authorized_tenants=None, enabled=True):
|
2012-11-16 21:12:09 +00:00
|
|
|
def get_user(request):
|
|
|
|
return user.User(id=id,
|
|
|
|
token=token,
|
|
|
|
user=username,
|
|
|
|
tenant_id=tenant_id,
|
|
|
|
service_catalog=service_catalog,
|
|
|
|
roles=roles,
|
|
|
|
enabled=enabled,
|
|
|
|
authorized_tenants=authorized_tenants,
|
|
|
|
endpoint=settings.OPENSTACK_KEYSTONE_URL)
|
|
|
|
utils.get_user = get_user
|
|
|
|
|
|
|
|
|
|
|
|
class SeleniumAdminTestCase(SeleniumTestCase):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Version of AdminTestCase for Selenium.
|
|
|
|
|
|
|
|
Sets an active user with the "admin" role for testing admin-only views and
|
|
|
|
functionality.
|
2012-11-16 21:12:09 +00:00
|
|
|
"""
|
|
|
|
def setActiveUser(self, *args, **kwargs):
|
|
|
|
if "roles" not in kwargs:
|
|
|
|
kwargs['roles'] = [self.roles.admin._info]
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setActiveUser(*args, **kwargs)
|
2013-12-10 13:00:17 +01:00
|
|
|
|
|
|
|
|
|
|
|
def my_custom_sort(flavor):
|
|
|
|
sort_order = {
|
|
|
|
'm1.secret': 0,
|
|
|
|
'm1.tiny': 1,
|
|
|
|
'm1.massive': 2,
|
2014-09-15 16:48:06 +02:00
|
|
|
'm1.metadata': 3,
|
2013-12-10 13:00:17 +01:00
|
|
|
}
|
|
|
|
return sort_order[flavor.name]
|
2014-03-15 00:50:18 -07:00
|
|
|
|
|
|
|
|
2019-01-06 15:05:43 +09:00
|
|
|
# TODO(amotoki): Investigate a way to run PluginTestCase with the main
|
|
|
|
# unit tests. Currently we fail to find a way to clean up urlpatterns and
|
|
|
|
# Site registry touched by setUp() cleanly. As a workaround, we run
|
|
|
|
# PluginTestCase as a separate test process. Hopefully this workaround has gone
|
2020-03-04 12:15:34 +02:00
|
|
|
# in future. For more detail, see bugs 1809983, 1866666 and
|
2019-04-22 11:33:08 +08:00
|
|
|
# https://review.opendev.org/#/c/627640/.
|
2020-04-18 08:38:36 +09:00
|
|
|
@horizon_helpers.pytest_mark('plugin_test')
|
2019-01-06 15:05:43 +09:00
|
|
|
@tag('plugin-test')
|
2014-03-15 00:50:18 -07:00
|
|
|
class PluginTestCase(TestCase):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""Test case for testing plugin system of Horizon.
|
|
|
|
|
|
|
|
For use with tests which deal with the pluggable dashboard and panel
|
|
|
|
configuration, it takes care of backing up and restoring the Horizon
|
|
|
|
configuration.
|
2014-03-15 00:50:18 -07:00
|
|
|
"""
|
|
|
|
def setUp(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super().setUp()
|
2014-03-15 00:50:18 -07:00
|
|
|
self.old_horizon_config = conf.HORIZON_CONFIG
|
|
|
|
conf.HORIZON_CONFIG = conf.LazySettings()
|
|
|
|
base.Horizon._urls()
|
|
|
|
# Store our original dashboards
|
|
|
|
self._discovered_dashboards = base.Horizon._registry.keys()
|
|
|
|
# Gather up and store our original panels for each dashboard
|
|
|
|
self._discovered_panels = {}
|
|
|
|
for dash in self._discovered_dashboards:
|
|
|
|
panels = base.Horizon._registry[dash]._registry.keys()
|
|
|
|
self._discovered_panels[dash] = panels
|
|
|
|
|
|
|
|
def tearDown(self):
|
2020-09-15 14:08:17 +09:00
|
|
|
super().tearDown()
|
2014-03-15 00:50:18 -07:00
|
|
|
conf.HORIZON_CONFIG = self.old_horizon_config
|
|
|
|
# Destroy our singleton and re-create it.
|
|
|
|
base.HorizonSite._instance = None
|
|
|
|
del base.Horizon
|
|
|
|
base.Horizon = base.HorizonSite()
|
|
|
|
# Reload the convenience references to Horizon stored in __init__
|
2020-01-09 16:49:18 +02:00
|
|
|
importlib.reload(importlib.import_module("horizon"))
|
2014-03-15 00:50:18 -07:00
|
|
|
# Re-register our original dashboards and panels.
|
|
|
|
# This is necessary because autodiscovery only works on the first
|
|
|
|
# import, and calling reload introduces innumerable additional
|
|
|
|
# problems. Manual re-registration is the only good way for testing.
|
|
|
|
for dash in self._discovered_dashboards:
|
|
|
|
base.Horizon.register(dash)
|
|
|
|
for panel in self._discovered_panels[dash]:
|
|
|
|
dash.register(panel)
|
|
|
|
self._reload_urls()
|
|
|
|
|
|
|
|
def _reload_urls(self):
|
2014-07-29 16:57:39 +02:00
|
|
|
"""CLeans up URLs.
|
|
|
|
|
|
|
|
Clears out the URL caches, reloads the root urls module, and
|
2014-03-15 00:50:18 -07:00
|
|
|
re-triggers the autodiscovery mechanism for Horizon. Allows URLs
|
|
|
|
to be re-calculated after registering new dashboards. Useful
|
|
|
|
only for testing and should never be used on a live site.
|
|
|
|
"""
|
2017-12-12 13:30:33 +09:00
|
|
|
urls.clear_url_caches()
|
2020-01-09 16:49:18 +02:00
|
|
|
importlib.reload(importlib.import_module(settings.ROOT_URLCONF))
|
2014-03-15 00:50:18 -07:00
|
|
|
base.Horizon._urls()
|
2014-08-09 06:21:19 +09:00
|
|
|
|
|
|
|
|
2015-12-11 10:39:26 +11:00
|
|
|
def mock_obj_to_dict(r):
|
|
|
|
return mock.Mock(**{'to_dict.return_value': r})
|
|
|
|
|
|
|
|
|
|
|
|
def mock_factory(r):
|
|
|
|
"""mocks all the attributes as well as the to_dict """
|
|
|
|
mocked = mock_obj_to_dict(r)
|
|
|
|
mocked.configure_mock(**r)
|
|
|
|
return mocked
|