Upgrade pylint version
So far, tobiko was using old pylint version. As we would like to replace the upper constraints that we use, we have to upgrade pylint to avoid some conflicts. This patch aspires to implement that. Depends-On: https://review.opendev.org/c/x/devstack-plugin-tobiko/+/921834 Change-Id: I630ec0edaf930d3bdd3eaeacd55ffebc203a5763
This commit is contained in:
parent
21988b0e6a
commit
859ce180cb
@ -129,7 +129,6 @@ disable=invalid-name,
|
|||||||
c-extension-no-member,
|
c-extension-no-member,
|
||||||
literal-comparison,
|
literal-comparison,
|
||||||
comparison-with-itself,
|
comparison-with-itself,
|
||||||
no-self-use,
|
|
||||||
no-classmethod-decorator,
|
no-classmethod-decorator,
|
||||||
no-staticmethod-decorator,
|
no-staticmethod-decorator,
|
||||||
useless-object-inheritance,
|
useless-object-inheritance,
|
||||||
@ -187,7 +186,6 @@ disable=invalid-name,
|
|||||||
broad-except,
|
broad-except,
|
||||||
redundant-u-string-prefix,
|
redundant-u-string-prefix,
|
||||||
unspecified-encoding,
|
unspecified-encoding,
|
||||||
dict-values-not-iterating,
|
|
||||||
|
|
||||||
# Enable the message, report, category or checker with the given id(s). You can
|
# Enable the message, report, category or checker with the given id(s). You can
|
||||||
# either give multiple identifier separated by comma (,) or put this option
|
# either give multiple identifier separated by comma (,) or put this option
|
||||||
@ -313,7 +311,6 @@ enable=syntax-error,
|
|||||||
unnecessary-pass,
|
unnecessary-pass,
|
||||||
unnecessary-lambda,
|
unnecessary-lambda,
|
||||||
duplicate-key,
|
duplicate-key,
|
||||||
assign-to-new-keyword,
|
|
||||||
useless-else-on-loop,
|
useless-else-on-loop,
|
||||||
exec-used,
|
exec-used,
|
||||||
eval-used,
|
eval-used,
|
||||||
@ -334,7 +331,6 @@ enable=syntax-error,
|
|||||||
signature-differs,
|
signature-differs,
|
||||||
abstract-method,
|
abstract-method,
|
||||||
super-init-not-called,
|
super-init-not-called,
|
||||||
no-init,
|
|
||||||
non-parent-init-called,
|
non-parent-init-called,
|
||||||
useless-super-delegation,
|
useless-super-delegation,
|
||||||
invalid-overridden-method,
|
invalid-overridden-method,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# pep8 and flake8 requirements
|
# pep8 and flake8 requirements
|
||||||
|
|
||||||
pre-commit >= 2.16.0 # MIT
|
pre-commit >= 2.16.0 # MIT
|
||||||
pylint===2.12.2 # GPL2
|
pylint>=2.5.3 # GPL2
|
||||||
|
@ -32,6 +32,7 @@ from tobiko.actors import _request
|
|||||||
A = typing.TypeVar('A', bound='ActorBase')
|
A = typing.TypeVar('A', bound='ActorBase')
|
||||||
|
|
||||||
|
|
||||||
|
# pylint: disable=inherit-non-class
|
||||||
class ActorRef(_proxy.CallProxyBase, _proxy.Generic[A]):
|
class ActorRef(_proxy.CallProxyBase, _proxy.Generic[A]):
|
||||||
|
|
||||||
_actor_class: typing.Type[A]
|
_actor_class: typing.Type[A]
|
||||||
|
@ -76,6 +76,7 @@ class CallHandler(abc.ABC):
|
|||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
# pylint: disable=inherit-non-class
|
||||||
class CallProxyBase(CallHandler, Generic[P], abc.ABC):
|
class CallProxyBase(CallHandler, Generic[P], abc.ABC):
|
||||||
|
|
||||||
def __class_getitem__(cls, item: typing.Type[P]):
|
def __class_getitem__(cls, item: typing.Type[P]):
|
||||||
@ -92,6 +93,7 @@ class CallProxyBase(CallHandler, Generic[P], abc.ABC):
|
|||||||
return is_public_abstract_method(obj)
|
return is_public_abstract_method(obj)
|
||||||
|
|
||||||
|
|
||||||
|
# pylint: disable=inherit-non-class
|
||||||
class CallProxy(CallProxyBase, Generic[P]):
|
class CallProxy(CallProxyBase, Generic[P]):
|
||||||
|
|
||||||
def __init__(self, handle_call: typing.Callable):
|
def __init__(self, handle_call: typing.Callable):
|
||||||
|
@ -251,6 +251,7 @@ def read_pid_file(pid_file: str) -> typing.Tuple[int, ...]:
|
|||||||
with fd:
|
with fd:
|
||||||
for line_number, line in enumerate(fd.readlines(), start=1):
|
for line_number, line in enumerate(fd.readlines(), start=1):
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
pid = None
|
||||||
if line:
|
if line:
|
||||||
try:
|
try:
|
||||||
pid = int(line.rstrip())
|
pid = int(line.rstrip())
|
||||||
@ -258,6 +259,8 @@ def read_pid_file(pid_file: str) -> typing.Tuple[int, ...]:
|
|||||||
LOG.exception(f"{pid_file}:{line_number}: value is "
|
LOG.exception(f"{pid_file}:{line_number}: value is "
|
||||||
f"not an integer ({line}).")
|
f"not an integer ({line}).")
|
||||||
continue
|
continue
|
||||||
|
if not pid:
|
||||||
|
continue
|
||||||
pids.append(pid)
|
pids.append(pid)
|
||||||
return tuple(pids)
|
return tuple(pids)
|
||||||
|
|
||||||
|
@ -500,8 +500,7 @@ class FixtureManager:
|
|||||||
self.fixtures[name] = fixture
|
self.fixtures[name] = fixture
|
||||||
return fixture
|
return fixture
|
||||||
|
|
||||||
@staticmethod
|
def init_fixture(self, obj: typing.Union[typing.Type[F], F],
|
||||||
def init_fixture(obj: typing.Union[typing.Type[F], F],
|
|
||||||
name: str,
|
name: str,
|
||||||
fixture_id: typing.Any,
|
fixture_id: typing.Any,
|
||||||
**kwargs) -> F:
|
**kwargs) -> F:
|
||||||
|
@ -233,6 +233,7 @@ class UnixHTTPConnection(unixconn.UnixHTTPConnection):
|
|||||||
sudo=False):
|
sudo=False):
|
||||||
self.ssh_client = ssh_client
|
self.ssh_client = ssh_client
|
||||||
self.sudo = sudo
|
self.sudo = sudo
|
||||||
|
self.sock = None
|
||||||
super(UnixHTTPConnection, self).__init__(base_url=base_url,
|
super(UnixHTTPConnection, self).__init__(base_url=base_url,
|
||||||
unix_socket=unix_socket,
|
unix_socket=unix_socket,
|
||||||
timeout=timeout)
|
timeout=timeout)
|
||||||
|
@ -361,6 +361,7 @@ class URLGlanceImageFixture(FileGlanceImageFixture):
|
|||||||
tobiko.check_valid_type(image_url, str)
|
tobiko.check_valid_type(image_url, str)
|
||||||
|
|
||||||
def get_image_file(self, image_file: str):
|
def get_image_file(self, image_file: str):
|
||||||
|
# pylint: disable=missing-timeout
|
||||||
http_request = requests.get(self.image_url, stream=True)
|
http_request = requests.get(self.image_url, stream=True)
|
||||||
expected_size = int(http_request.headers.get('content-length', 0))
|
expected_size = int(http_request.headers.get('content-length', 0))
|
||||||
chunks = http_request.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
|
chunks = http_request.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
|
||||||
|
@ -248,7 +248,7 @@ def parse_ips_from_db_connections(con_str):
|
|||||||
try:
|
try:
|
||||||
addr = netaddr.IPAddress(tmp_addr.strip(']['))
|
addr = netaddr.IPAddress(tmp_addr.strip(']['))
|
||||||
except ValueError as ex:
|
except ValueError as ex:
|
||||||
msg = 'Invalid IP address "{}" in "{}"'.format(addr, con_str)
|
msg = 'Invalid IP address "{}" in "{}"'.format(tmp_addr, con_str)
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise InvalidDBConnString(message=msg) from ex
|
raise InvalidDBConnString(message=msg) from ex
|
||||||
addrs.append(addr)
|
addrs.append(addr)
|
||||||
|
@ -298,6 +298,7 @@ def assert_equal_containers_state(expected_containers_list=None,
|
|||||||
second time it creates a current containers states list and
|
second time it creates a current containers states list and
|
||||||
compares them, they must be identical"""
|
compares them, they must be identical"""
|
||||||
|
|
||||||
|
expected_containers_list_df = []
|
||||||
# if we have a file or an explicit variable use that , otherwise create
|
# if we have a file or an explicit variable use that , otherwise create
|
||||||
# and return
|
# and return
|
||||||
if recreate_expected or (
|
if recreate_expected or (
|
||||||
|
@ -93,6 +93,7 @@ def find_ip_address(ip_version: int = None,
|
|||||||
|
|
||||||
|
|
||||||
def parse_ip_address(text: str) -> typing.Tuple[netaddr.IPAddress, int]:
|
def parse_ip_address(text: str) -> typing.Tuple[netaddr.IPAddress, int]:
|
||||||
|
address = text
|
||||||
if '/' in text:
|
if '/' in text:
|
||||||
# Remove netmask prefix length
|
# Remove netmask prefix length
|
||||||
address, prefix_len_text = text.split('/', 1)
|
address, prefix_len_text = text.split('/', 1)
|
||||||
|
@ -77,8 +77,7 @@ class SockHeader():
|
|||||||
return len(self.header)
|
return len(self.header)
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
for elem in self.header:
|
yield from self.header
|
||||||
yield elem
|
|
||||||
|
|
||||||
|
|
||||||
class SockLine(str):
|
class SockLine(str):
|
||||||
@ -118,6 +117,7 @@ def _ss(params: str = '',
|
|||||||
**execute_params) -> typing.List[SockData]:
|
**execute_params) -> typing.List[SockData]:
|
||||||
execute_params.update({'sudo': True})
|
execute_params.update({'sudo': True})
|
||||||
sockets = []
|
sockets = []
|
||||||
|
headers = None
|
||||||
if table_header:
|
if table_header:
|
||||||
# Predefined header might be necessary if the command is expected to
|
# Predefined header might be necessary if the command is expected to
|
||||||
# be executed in any kind of environments. Old versrions of `ss`
|
# be executed in any kind of environments. Old versrions of `ss`
|
||||||
@ -143,7 +143,7 @@ def _ss(params: str = '',
|
|||||||
parsed_header = True
|
parsed_header = True
|
||||||
continue
|
continue
|
||||||
sock_info = SockLine(line.strip())
|
sock_info = SockLine(line.strip())
|
||||||
if parser:
|
if parser and headers:
|
||||||
try:
|
try:
|
||||||
sockets.append(parser(headers, sock_info))
|
sockets.append(parser(headers, sock_info))
|
||||||
except ValueError as ex:
|
except ValueError as ex:
|
||||||
|
@ -297,7 +297,7 @@ def binding_address(url):
|
|||||||
def binding_url(address):
|
def binding_url(address):
|
||||||
if isinstance(address, tuple):
|
if isinstance(address, tuple):
|
||||||
try:
|
try:
|
||||||
hostname, = address
|
hostname, port = address
|
||||||
except ValueError:
|
except ValueError:
|
||||||
hostname, port = address
|
hostname, port = address
|
||||||
return 'tcp://{hostname}:{port}'.format(hostname=hostname,
|
return 'tcp://{hostname}:{port}'.format(hostname=hostname,
|
||||||
|
@ -372,7 +372,6 @@ def rotate_logs(node):
|
|||||||
containers = get_filtered_node_containers(node, ['logrotate.*', ])
|
containers = get_filtered_node_containers(node, ['logrotate.*', ])
|
||||||
if not containers:
|
if not containers:
|
||||||
tobiko.skip_test('No logrotate container has been found')
|
tobiko.skip_test('No logrotate container has been found')
|
||||||
else:
|
|
||||||
container = containers[0]
|
container = containers[0]
|
||||||
sh.execute(f'docker exec -u root {container} logrotate '
|
sh.execute(f'docker exec -u root {container} logrotate '
|
||||||
'-f /etc/logrotate-crond.conf',
|
'-f /etc/logrotate-crond.conf',
|
||||||
|
@ -26,7 +26,7 @@ import tobiko
|
|||||||
from tobiko.openstack import keystone
|
from tobiko.openstack import keystone
|
||||||
from tobiko.openstack import neutron
|
from tobiko.openstack import neutron
|
||||||
from tobiko.openstack import nova
|
from tobiko.openstack import nova
|
||||||
from tobiko.openstack import topology
|
from tobiko.openstack import topology as osp_topology
|
||||||
from tobiko.shell import ping
|
from tobiko.shell import ping
|
||||||
from tobiko.shell import sh
|
from tobiko.shell import sh
|
||||||
|
|
||||||
@ -37,17 +37,18 @@ PatternType = type(re.compile(r''))
|
|||||||
@keystone.skip_unless_has_keystone_credentials()
|
@keystone.skip_unless_has_keystone_credentials()
|
||||||
class OpenStackTopologyTest(testtools.TestCase):
|
class OpenStackTopologyTest(testtools.TestCase):
|
||||||
|
|
||||||
expected_group: topology.OpenstackGroupNamesType = None
|
expected_group: osp_topology.OpenstackGroupNamesType = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def topology(self) -> topology.OpenStackTopology:
|
def topology(self) -> osp_topology.OpenStackTopology:
|
||||||
return topology.get_openstack_topology()
|
return osp_topology.get_openstack_topology()
|
||||||
|
|
||||||
def test_get_openstack_topology(self):
|
def test_get_openstack_topology(self):
|
||||||
topology_class = type(self.topology)
|
topology_class = type(self.topology)
|
||||||
topo = topology.get_openstack_topology(topology_class=topology_class)
|
topo = osp_topology.get_openstack_topology(
|
||||||
|
topology_class=topology_class)
|
||||||
self.assertIs(topo, self.topology)
|
self.assertIs(topo, self.topology)
|
||||||
self.assertIsInstance(topo, topology.OpenStackTopology)
|
self.assertIsInstance(topo, osp_topology.OpenStackTopology)
|
||||||
|
|
||||||
def test_ping_node(self):
|
def test_ping_node(self):
|
||||||
for node in self.topology.nodes:
|
for node in self.topology.nodes:
|
||||||
@ -80,7 +81,7 @@ class OpenStackTopologyTest(testtools.TestCase):
|
|||||||
self.assertIn(node, nodes)
|
self.assertIn(node, nodes)
|
||||||
|
|
||||||
def test_list_openstack_topology(self, group=None, hostnames=None):
|
def test_list_openstack_topology(self, group=None, hostnames=None):
|
||||||
nodes = topology.list_openstack_nodes(
|
nodes = osp_topology.list_openstack_nodes(
|
||||||
topology=self.topology, group=group, hostnames=hostnames)
|
topology=self.topology, group=group, hostnames=hostnames)
|
||||||
self.assertTrue(set(nodes).issubset(set(self.topology.nodes)))
|
self.assertTrue(set(nodes).issubset(set(self.topology.nodes)))
|
||||||
self.assertEqual(len(set(nodes)), len(nodes),
|
self.assertEqual(len(set(nodes)), len(nodes),
|
||||||
@ -136,7 +137,7 @@ class OpenStackTopologyTest(testtools.TestCase):
|
|||||||
self.assertEqual(expected_nodes, actual_nodes)
|
self.assertEqual(expected_nodes, actual_nodes)
|
||||||
|
|
||||||
def test_list_nodes_processes(self):
|
def test_list_nodes_processes(self):
|
||||||
node = random.choice(topology.list_openstack_nodes(
|
node = random.choice(osp_topology.list_openstack_nodes(
|
||||||
group=self.expected_group))
|
group=self.expected_group))
|
||||||
filename = sh.execute(
|
filename = sh.execute(
|
||||||
'mktemp', ssh_client=node.ssh_client).stdout.strip()
|
'mktemp', ssh_client=node.ssh_client).stdout.strip()
|
||||||
@ -147,7 +148,7 @@ class OpenStackTopologyTest(testtools.TestCase):
|
|||||||
ssh_client=node.ssh_client)
|
ssh_client=node.ssh_client)
|
||||||
|
|
||||||
# Process isn't listed before creation
|
# Process isn't listed before creation
|
||||||
processes = topology.list_nodes_processes(
|
processes = osp_topology.list_nodes_processes(
|
||||||
command_line=command_line,
|
command_line=command_line,
|
||||||
hostnames=[node.hostname])
|
hostnames=[node.hostname])
|
||||||
self.assertEqual([], processes,
|
self.assertEqual([], processes,
|
||||||
@ -156,7 +157,7 @@ class OpenStackTopologyTest(testtools.TestCase):
|
|||||||
# Process is listed after creation
|
# Process is listed after creation
|
||||||
process.execute()
|
process.execute()
|
||||||
self.addCleanup(process.kill)
|
self.addCleanup(process.kill)
|
||||||
processes = topology.list_nodes_processes(
|
processes = osp_topology.list_nodes_processes(
|
||||||
command_line=command_line,
|
command_line=command_line,
|
||||||
hostnames=[node.hostname])
|
hostnames=[node.hostname])
|
||||||
self.assertEqual(command_line, processes.unique.command_line)
|
self.assertEqual(command_line, processes.unique.command_line)
|
||||||
@ -165,7 +166,7 @@ class OpenStackTopologyTest(testtools.TestCase):
|
|||||||
# Process isn't listed after kill
|
# Process isn't listed after kill
|
||||||
processes.unique.kill()
|
processes.unique.kill()
|
||||||
for attempt in tobiko.retry(timeout=30., interval=5.):
|
for attempt in tobiko.retry(timeout=30., interval=5.):
|
||||||
processes = topology.list_nodes_processes(
|
processes = osp_topology.list_nodes_processes(
|
||||||
command_line=command_line,
|
command_line=command_line,
|
||||||
hostnames=[node.hostname])
|
hostnames=[node.hostname])
|
||||||
if not processes:
|
if not processes:
|
||||||
@ -175,9 +176,9 @@ class OpenStackTopologyTest(testtools.TestCase):
|
|||||||
|
|
||||||
@neutron.skip_unless_is_ovs()
|
@neutron.skip_unless_is_ovs()
|
||||||
def test_l3_agent_mode(self):
|
def test_l3_agent_mode(self):
|
||||||
for node in topology.list_openstack_nodes(
|
for node in osp_topology.list_openstack_nodes(
|
||||||
group=['controller', 'compute', 'overcloud']):
|
group=['controller', 'compute', 'overcloud']):
|
||||||
assert isinstance(node, topology.OpenStackTopologyNode)
|
assert isinstance(node, osp_topology.OpenStackTopologyNode)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
neutron.get_l3_agent_mode(
|
neutron.get_l3_agent_mode(
|
||||||
l3_agent_conf_path=node.l3_agent_conf_path,
|
l3_agent_conf_path=node.l3_agent_conf_path,
|
||||||
@ -195,7 +196,7 @@ class HostsNamespaceTest(testtools.TestCase):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def all_hostnames(self) -> typing.List[str]:
|
def all_hostnames(self) -> typing.List[str]:
|
||||||
nodes = topology.list_openstack_nodes()
|
nodes = osp_topology.list_openstack_nodes()
|
||||||
return sorted(node.hostname for node in nodes)
|
return sorted(node.hostname for node in nodes)
|
||||||
|
|
||||||
def selected_hostames(self, hostnames: typing.Iterable[str] = None) -> \
|
def selected_hostames(self, hostnames: typing.Iterable[str] = None) -> \
|
||||||
@ -207,7 +208,7 @@ class HostsNamespaceTest(testtools.TestCase):
|
|||||||
|
|
||||||
def test_get_hosts_namespaces(self,
|
def test_get_hosts_namespaces(self,
|
||||||
hostnames: typing.Iterable[str] = None):
|
hostnames: typing.Iterable[str] = None):
|
||||||
namespaces = topology.get_hosts_namespaces(hostnames=hostnames)
|
namespaces = osp_topology.get_hosts_namespaces(hostnames=hostnames)
|
||||||
self.assertIsInstance(namespaces, dict)
|
self.assertIsInstance(namespaces, dict)
|
||||||
for namespace, _hostnames in namespaces.items():
|
for namespace, _hostnames in namespaces.items():
|
||||||
self.assertIsInstance(namespace, str)
|
self.assertIsInstance(namespace, str)
|
||||||
@ -221,9 +222,9 @@ class HostsNamespaceTest(testtools.TestCase):
|
|||||||
|
|
||||||
def test_assert_namespace_in_hosts(self,
|
def test_assert_namespace_in_hosts(self,
|
||||||
hostnames: typing.Iterable[str] = None):
|
hostnames: typing.Iterable[str] = None):
|
||||||
namespaces = topology.get_hosts_namespaces(hostnames=hostnames)
|
namespaces = osp_topology.get_hosts_namespaces(hostnames=hostnames)
|
||||||
for namespace, hostnames in namespaces.items():
|
for namespace, hostnames in namespaces.items():
|
||||||
topology.assert_namespace_in_hosts(namespace,
|
osp_topology.assert_namespace_in_hosts(namespace,
|
||||||
hostnames=hostnames)
|
hostnames=hostnames)
|
||||||
|
|
||||||
def test_assert_namespace_in_hosts_with_hostnames(self):
|
def test_assert_namespace_in_hosts_with_hostnames(self):
|
||||||
@ -231,12 +232,12 @@ class HostsNamespaceTest(testtools.TestCase):
|
|||||||
|
|
||||||
def test_assert_namespaces_in_host_failure(self):
|
def test_assert_namespaces_in_host_failure(self):
|
||||||
self.assertRaises(self.failureException,
|
self.assertRaises(self.failureException,
|
||||||
topology.assert_namespace_in_hosts,
|
osp_topology.assert_namespace_in_hosts,
|
||||||
'<invalid>')
|
'<invalid>')
|
||||||
|
|
||||||
def test_assert_namespace_not_in_hosts(
|
def test_assert_namespace_not_in_hosts(
|
||||||
self, hostnames: typing.Iterable[str] = None):
|
self, hostnames: typing.Iterable[str] = None):
|
||||||
topology.assert_namespace_not_in_hosts('<invalid>',
|
osp_topology.assert_namespace_not_in_hosts('<invalid>',
|
||||||
hostnames=hostnames)
|
hostnames=hostnames)
|
||||||
|
|
||||||
def test_assert_namespace_not_in_hosts_with_hostnames(self):
|
def test_assert_namespace_not_in_hosts_with_hostnames(self):
|
||||||
@ -244,8 +245,8 @@ class HostsNamespaceTest(testtools.TestCase):
|
|||||||
hostnames=self.all_hostnames[:1])
|
hostnames=self.all_hostnames[:1])
|
||||||
|
|
||||||
def test_assert_namespace_not_in_hosts_failure(self):
|
def test_assert_namespace_not_in_hosts_failure(self):
|
||||||
namespaces = topology.get_hosts_namespaces()
|
namespaces = osp_topology.get_hosts_namespaces()
|
||||||
for namespace in namespaces:
|
for namespace in namespaces:
|
||||||
self.assertRaises(self.failureException,
|
self.assertRaises(self.failureException,
|
||||||
topology.assert_namespace_not_in_hosts,
|
osp_topology.assert_namespace_not_in_hosts,
|
||||||
namespace)
|
namespace)
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import tobiko
|
import tobiko
|
||||||
from tobiko.openstack import topology
|
from tobiko.openstack import topology as osp_topology
|
||||||
from tobiko.shell import ping
|
from tobiko.shell import ping
|
||||||
from tobiko.shell import sh
|
from tobiko.shell import sh
|
||||||
from tobiko.tests.functional.openstack import test_topology
|
from tobiko.tests.functional.openstack import test_topology
|
||||||
@ -26,7 +26,7 @@ from tobiko import podified
|
|||||||
@podified.skip_if_not_podified
|
@podified.skip_if_not_podified
|
||||||
class PodifiedTopologyTest(test_topology.OpenStackTopologyTest):
|
class PodifiedTopologyTest(test_topology.OpenStackTopologyTest):
|
||||||
|
|
||||||
expected_group: topology.OpenstackGroupNamesType = 'compute'
|
expected_group: osp_topology.OpenstackGroupNamesType = 'compute'
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def topology(self) -> podified.PodifiedTopology:
|
def topology(self) -> podified.PodifiedTopology:
|
||||||
|
@ -218,6 +218,8 @@ class TripleoContainersHealthTest(BaseContainersHealtTest):
|
|||||||
containers' states: ~/expected_containers_list_df.csv'
|
containers' states: ~/expected_containers_list_df.csv'
|
||||||
second time it creates a current containers states list and
|
second time it creates a current containers states list and
|
||||||
compares them, they must be identical"""
|
compares them, they must be identical"""
|
||||||
|
|
||||||
|
expected_containers_list_df = []
|
||||||
# if we have a file or an explicit variable use that ,
|
# if we have a file or an explicit variable use that ,
|
||||||
# otherwise create and return
|
# otherwise create and return
|
||||||
if recreate_expected or (not (expected_containers_list or
|
if recreate_expected or (not (expected_containers_list or
|
||||||
@ -358,6 +360,8 @@ class PodifiedContainersHealthTest(BaseContainersHealtTest):
|
|||||||
containers' states: ~/expected_containers_list_df.csv'
|
containers' states: ~/expected_containers_list_df.csv'
|
||||||
second time it creates a current containers states list and
|
second time it creates a current containers states list and
|
||||||
compares them, they must be identical"""
|
compares them, they must be identical"""
|
||||||
|
|
||||||
|
expected_containers_list_df = []
|
||||||
# if we have a file or an explicit variable use that ,
|
# if we have a file or an explicit variable use that ,
|
||||||
# otherwise create and return
|
# otherwise create and return
|
||||||
if recreate_expected or (not (expected_containers_list or
|
if recreate_expected or (not (expected_containers_list or
|
||||||
|
@ -111,6 +111,8 @@ class TestCheckValidType(unit.TobikoUnitTest):
|
|||||||
class TestExcInfo(unit.TobikoUnitTest):
|
class TestExcInfo(unit.TobikoUnitTest):
|
||||||
|
|
||||||
def test_exc_info(self):
|
def test_exc_info(self):
|
||||||
|
exc_type, exc_value, traceback = None, None, None
|
||||||
|
exc_info = None
|
||||||
try:
|
try:
|
||||||
raise RuntimeError('some error')
|
raise RuntimeError('some error')
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
|
@ -521,6 +521,7 @@ def assert_equal_containers_state(expected_containers_list=None,
|
|||||||
second time it creates a current containers states list and
|
second time it creates a current containers states list and
|
||||||
compares them, they must be identical"""
|
compares them, they must be identical"""
|
||||||
|
|
||||||
|
expected_containers_list_df = []
|
||||||
# if we have a file or an explicit variable use that , otherwise create
|
# if we have a file or an explicit variable use that , otherwise create
|
||||||
# and return
|
# and return
|
||||||
if recreate_expected or (
|
if recreate_expected or (
|
||||||
|
Loading…
Reference in New Issue
Block a user