Add simple Octavia traffic scenario
Add a scenario for octavia: create a simple load balanacer with HTTP listener and pool, add 2 members and ensure that traffic is fairly dispatched. Change-Id: Id8596ae1940ed97c52c0ef0e8f3e0ba7888d728a
This commit is contained in:
parent
ddfb968370
commit
d4b125218a
@ -250,3 +250,15 @@
|
||||
|
||||
# Default ubuntu password (string value)
|
||||
#password = <None>
|
||||
|
||||
[octavia]
|
||||
|
||||
#
|
||||
# From tobiko
|
||||
#
|
||||
|
||||
# Interval to check for status changes, in seconds (integer value)
|
||||
#check_interval = 5
|
||||
|
||||
# Timeout, in seconds, to wait for a status change (integer value)
|
||||
#check_timeout = 360
|
||||
|
@ -34,6 +34,7 @@ CONFIG_MODULES = ['tobiko.openstack.glance.config',
|
||||
'tobiko.openstack.keystone.config',
|
||||
'tobiko.openstack.neutron.config',
|
||||
'tobiko.openstack.nova.config',
|
||||
'tobiko.openstack.octavia.config',
|
||||
'tobiko.openstack.os_faults.config',
|
||||
'tobiko.openstack.topology.config',
|
||||
'tobiko.shell.ssh.config',
|
||||
|
@ -21,3 +21,5 @@ get_loadbalancer = _client.get_loadbalancer
|
||||
get_octavia_client = _client.get_octavia_client
|
||||
octavia_client = _client.octavia_client
|
||||
OctaviaClientFixture = _client.OctaviaClientFixture
|
||||
|
||||
get_loadbalancer = _client.get_loadbalancer
|
||||
|
36
tobiko/openstack/octavia/config.py
Normal file
36
tobiko/openstack/octavia/config.py
Normal file
@ -0,0 +1,36 @@
|
||||
# Copyright 2019 Red Hat
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
import itertools
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
GROUP_NAME = 'octavia'
|
||||
OPTIONS = [
|
||||
cfg.IntOpt('check_interval',
|
||||
default=5,
|
||||
help='Interval to check for status changes, in seconds.'),
|
||||
cfg.IntOpt('check_timeout',
|
||||
default=360,
|
||||
help='Timeout, in seconds, to wait for a status change.'),
|
||||
]
|
||||
|
||||
|
||||
def register_tobiko_options(conf):
|
||||
conf.register_opts(group=cfg.OptGroup(GROUP_NAME), opts=OPTIONS)
|
||||
|
||||
|
||||
def list_options():
|
||||
return [(GROUP_NAME, itertools.chain(OPTIONS))]
|
@ -21,6 +21,7 @@ from tobiko.openstack.stacks import _fedora
|
||||
from tobiko.openstack.stacks import _l3ha
|
||||
from tobiko.openstack.stacks import _neutron
|
||||
from tobiko.openstack.stacks import _nova
|
||||
from tobiko.openstack.stacks import _octavia
|
||||
from tobiko.openstack.stacks import _ubuntu
|
||||
|
||||
CentosFlavorStackFixture = _centos.CentosFlavorStackFixture
|
||||
@ -59,3 +60,9 @@ FlavorStackFixture = _nova.FlavorStackFixture
|
||||
UbuntuFlavorStackFixture = _ubuntu.UbuntuFlavorStackFixture
|
||||
UbuntuImageFixture = _ubuntu.UbuntuImageFixture
|
||||
UbuntuServerStackFixture = _ubuntu.UbuntuServerStackFixture
|
||||
|
||||
OctaviaLoadbalancerStackFixture = _octavia.OctaviaLoadbalancerStackFixture
|
||||
OctaviaListenerStackFixture = _octavia.OctaviaListenerStackFixture
|
||||
OctaviaMemberServerStackFixture = _octavia.OctaviaMemberServerStackFixture
|
||||
OctaviaServerStackFixture = _octavia.OctaviaServerStackFixture
|
||||
OctaviaClientServerStackFixture = _octavia.OctaviaClientServerStackFixture
|
||||
|
152
tobiko/openstack/stacks/_octavia.py
Normal file
152
tobiko/openstack/stacks/_octavia.py
Normal file
@ -0,0 +1,152 @@
|
||||
# Copyright (c) 2019 Red Hat, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import tobiko
|
||||
from tobiko import config
|
||||
from tobiko.openstack import heat
|
||||
from tobiko.openstack.stacks import _centos
|
||||
from tobiko.openstack.stacks import _cirros
|
||||
from tobiko.openstack.stacks import _hot
|
||||
from tobiko.openstack.stacks import _neutron
|
||||
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class OctaviaVipNetworkStackFixture(_neutron.NetworkStackFixture):
|
||||
# Load Balancer VIP network must use port security (required by neutron to
|
||||
# support allowed address pairs on ports)
|
||||
port_security_enabled = True
|
||||
|
||||
|
||||
class OctaviaMemberNetworkStackFixture(_neutron.NetworkStackFixture):
|
||||
pass
|
||||
|
||||
|
||||
class OctaviaCentosServerStackFixture(_centos.CentosServerStackFixture):
|
||||
network_stack = tobiko.required_setup_fixture(
|
||||
OctaviaMemberNetworkStackFixture)
|
||||
|
||||
@property
|
||||
def user_data(self):
|
||||
# Launch a webserver on port 80 that replies the server name to the
|
||||
# client
|
||||
return ("#cloud-config\n"
|
||||
"packages:\n"
|
||||
"- httpd\n"
|
||||
"runcmd:\n"
|
||||
"- [ sh, -c, \"hostname > /var/www/html/id\" ]\n"
|
||||
"- [ systemctl, enable, --now, httpd ]\n")
|
||||
|
||||
|
||||
class OctaviaCirrosServerStackFixture(_cirros.CirrosServerStackFixture):
|
||||
network_stack = tobiko.required_setup_fixture(
|
||||
OctaviaMemberNetworkStackFixture)
|
||||
|
||||
@property
|
||||
def user_data(self):
|
||||
# Launch a webserver on port 80 that replies the server name to the
|
||||
# client
|
||||
# This webserver relies on the nc command which may fail if multiple
|
||||
# clients connect at the same time. For concurrency testing,
|
||||
# OctaviaCentosServerStackFixture is more suited to handle multiple
|
||||
# requests.
|
||||
return ("#!/bin/sh\n"
|
||||
"sudo nc -k -p 80 -e echo -e \"HTTP/1.0 200 OK\r\n"
|
||||
"\r\n$(hostname)\"\n")
|
||||
|
||||
|
||||
class OctaviaServerStackFixture(OctaviaCirrosServerStackFixture):
|
||||
pass
|
||||
|
||||
|
||||
class OctaviaLoadbalancerStackFixture(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/load_balancer.yaml')
|
||||
|
||||
vip_network = tobiko.required_setup_fixture(OctaviaVipNetworkStackFixture)
|
||||
|
||||
ip_version = 4
|
||||
|
||||
@property
|
||||
def vip_subnet_id(self):
|
||||
if self.ip_version == 4:
|
||||
return self.vip_network.ipv4_subnet_id
|
||||
else:
|
||||
return self.vip_network.ipv6_subnet_id
|
||||
|
||||
|
||||
class OctaviaListenerStackFixture(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/listener.yaml')
|
||||
|
||||
loadbalancer = tobiko.required_setup_fixture(
|
||||
OctaviaLoadbalancerStackFixture)
|
||||
|
||||
lb_port = 80
|
||||
|
||||
pool_protocol = 'HTTP'
|
||||
|
||||
lb_protocol = 'HTTP'
|
||||
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
|
||||
hm_type = 'HTTP'
|
||||
|
||||
@property
|
||||
def loadbalancer_id(self):
|
||||
return self.loadbalancer.loadbalancer_id
|
||||
|
||||
|
||||
class OctaviaMemberServerStackFixture(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/member.yaml')
|
||||
|
||||
listener = tobiko.required_setup_fixture(OctaviaListenerStackFixture)
|
||||
|
||||
server_stack = tobiko.required_setup_fixture(OctaviaServerStackFixture)
|
||||
|
||||
application_port = 80
|
||||
|
||||
ip_version = 4
|
||||
|
||||
@property
|
||||
def pool_id(self):
|
||||
return self.listener.pool_id
|
||||
|
||||
@property
|
||||
def subnet_id(self):
|
||||
if self.ip_version == 4:
|
||||
return self.server_stack.network_stack.ipv4_subnet_id
|
||||
else:
|
||||
return self.server_stack.network_stack.ipv6_subnet_id
|
||||
|
||||
@property
|
||||
def member_address(self):
|
||||
return [
|
||||
fixed_ip['ip_address']
|
||||
for fixed_ip in self.server_stack.fixed_ips
|
||||
if ((self.ip_version == 4 and
|
||||
':' not in fixed_ip['ip_address']) or
|
||||
(self.ip_version == 6 and
|
||||
':' in fixed_ip['ip_address']))
|
||||
][0]
|
||||
|
||||
|
||||
class OctaviaClientServerStackFixture(_cirros.CirrosServerStackFixture):
|
||||
network_stack = tobiko.required_setup_fixture(
|
||||
OctaviaVipNetworkStackFixture)
|
57
tobiko/openstack/stacks/octavia/listener.yaml
Normal file
57
tobiko/openstack/stacks/octavia/listener.yaml
Normal file
@ -0,0 +1,57 @@
|
||||
heat_template_version: 2015-10-15
|
||||
|
||||
description: A Listener and a pool for a Load Balancer
|
||||
|
||||
parameters:
|
||||
lb_port:
|
||||
type: number
|
||||
default: 80
|
||||
description: Port used by the listener
|
||||
|
||||
lb_protocol:
|
||||
type: string
|
||||
default: HTTP
|
||||
description: Public protocol exposed by the listener
|
||||
|
||||
lb_algorithm:
|
||||
type: string
|
||||
default: ROUND_ROBIN
|
||||
description: Load balancing algorithm
|
||||
|
||||
pool_protocol:
|
||||
type: string
|
||||
default: HTTP
|
||||
description: Protocol used by the pool members
|
||||
|
||||
hm_type:
|
||||
type: string
|
||||
default: HTTP
|
||||
description: Type of health-monitor
|
||||
|
||||
loadbalancer_id:
|
||||
type: string
|
||||
description: ID of the load balancer
|
||||
|
||||
resources:
|
||||
pool:
|
||||
type: OS::Octavia::Pool
|
||||
properties:
|
||||
lb_algorithm: { get_param: lb_algorithm }
|
||||
protocol: { get_param: pool_protocol }
|
||||
listener: { get_resource: listener }
|
||||
|
||||
listener:
|
||||
type: OS::Octavia::Listener
|
||||
properties:
|
||||
loadbalancer: { get_param: loadbalancer_id }
|
||||
protocol: { get_param: lb_protocol }
|
||||
protocol_port: { get_param: lb_port }
|
||||
|
||||
outputs:
|
||||
listener_id:
|
||||
description: Listener ID
|
||||
value: { get_resource: listener }
|
||||
|
||||
pool_id:
|
||||
description: Pool ID
|
||||
value: { get_resource: pool }
|
25
tobiko/openstack/stacks/octavia/load_balancer.yaml
Normal file
25
tobiko/openstack/stacks/octavia/load_balancer.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
heat_template_version: 2015-10-15
|
||||
|
||||
description: A Load Balancer
|
||||
|
||||
parameters:
|
||||
vip_subnet_id:
|
||||
type: string
|
||||
description: ID of the load balancer public subnet
|
||||
constraints:
|
||||
- custom_constraint: neutron.subnet
|
||||
|
||||
resources:
|
||||
loadbalancer:
|
||||
type: OS::Octavia::LoadBalancer
|
||||
properties:
|
||||
vip_subnet: { get_param: vip_subnet_id }
|
||||
|
||||
outputs:
|
||||
loadbalancer_id:
|
||||
description: Load Balancer ID
|
||||
value: { get_resource: loadbalancer }
|
||||
|
||||
loadbalancer_vip:
|
||||
description: IP address of the load balancer's VIP port
|
||||
value: { get_attr: [ loadbalancer, vip_address ] }
|
37
tobiko/openstack/stacks/octavia/member.yaml
Normal file
37
tobiko/openstack/stacks/octavia/member.yaml
Normal file
@ -0,0 +1,37 @@
|
||||
heat_template_version: 2015-10-15
|
||||
|
||||
description: A Load Balancer Member
|
||||
|
||||
parameters:
|
||||
pool_id:
|
||||
type: string
|
||||
description: The ID of the load balancer's pool
|
||||
|
||||
member_address:
|
||||
type: string
|
||||
description: The IP address of the member
|
||||
|
||||
subnet_id:
|
||||
type: string
|
||||
description: the ID of the subnet used by member
|
||||
constraints:
|
||||
- custom_constraint: neutron.subnet
|
||||
|
||||
application_port:
|
||||
type: number
|
||||
default: 80
|
||||
description: The port number of the member's application
|
||||
|
||||
resources:
|
||||
member:
|
||||
type: OS::Octavia::PoolMember
|
||||
properties:
|
||||
pool: { get_param: pool_id }
|
||||
address: { get_param: member_address }
|
||||
subnet: { get_param: subnet_id }
|
||||
protocol_port: { get_param: application_port }
|
||||
|
||||
outputs:
|
||||
member_id:
|
||||
description: member ID
|
||||
value: { get_resource: member }
|
0
tobiko/tests/scenario/octavia/__init__.py
Normal file
0
tobiko/tests/scenario/octavia/__init__.py
Normal file
223
tobiko/tests/scenario/octavia/test_traffic.py
Normal file
223
tobiko/tests/scenario/octavia/test_traffic.py
Normal file
@ -0,0 +1,223 @@
|
||||
# Copyright (c) 2019 Red Hat
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
import time
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import tobiko
|
||||
from tobiko import config
|
||||
from tobiko.openstack import keystone
|
||||
from tobiko.openstack import octavia
|
||||
from tobiko.openstack import stacks
|
||||
from tobiko.shell import ssh
|
||||
from tobiko.shell import sh
|
||||
from tobiko.tests import base
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
CURL_OPTIONS = "-f --connect-timeout 2 -g"
|
||||
|
||||
|
||||
class OctaviaOtherServerStackFixture(
|
||||
stacks.OctaviaServerStackFixture):
|
||||
pass
|
||||
|
||||
|
||||
class OctaviaOtherMemberServerStackFixture(
|
||||
stacks.OctaviaMemberServerStackFixture):
|
||||
server_stack = tobiko.required_setup_fixture(
|
||||
OctaviaOtherServerStackFixture)
|
||||
|
||||
|
||||
class RequestException(tobiko.TobikoException):
|
||||
message = ("Error while sending request to server "
|
||||
"(command was '{command}'): {error}")
|
||||
|
||||
|
||||
class TimeoutException(tobiko.TobikoException):
|
||||
message = "Timeout exception: {reason}"
|
||||
|
||||
|
||||
@keystone.skip_if_missing_service(name='octavia')
|
||||
class OctaviaBasicTrafficScenarioTest(base.TobikoTest):
|
||||
"""Octavia traffic scenario test.
|
||||
|
||||
Create a load balancer with 2 members that run a server application,
|
||||
Create a client that is connected to the load balancer VIP port,
|
||||
Generate network traffic from the client to the load balanacer.
|
||||
"""
|
||||
loadbalancer_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaLoadbalancerStackFixture)
|
||||
|
||||
listener_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaListenerStackFixture)
|
||||
|
||||
member1_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaMemberServerStackFixture)
|
||||
|
||||
member2_stack = tobiko.required_setup_fixture(
|
||||
OctaviaOtherMemberServerStackFixture)
|
||||
|
||||
client_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaClientServerStackFixture)
|
||||
|
||||
members_count = 2
|
||||
|
||||
def setUp(self):
|
||||
super(OctaviaBasicTrafficScenarioTest, self).setUp()
|
||||
|
||||
# Wait for members
|
||||
self._check_member(self.member1_stack)
|
||||
self._check_member(self.member2_stack)
|
||||
|
||||
# Check if load balancer is functional
|
||||
self._check_loadbalancer()
|
||||
|
||||
def _request(self, client_stack, server_ip_address, protocol, server_port):
|
||||
"""Perform a request on a server.
|
||||
|
||||
Returns the response in case of success, throws an RequestException
|
||||
otherwise.
|
||||
"""
|
||||
if ':' in server_ip_address:
|
||||
# Add square brackets around IPv6 address to please curl
|
||||
server_ip_address = "[{}]".format(server_ip_address)
|
||||
cmd = "curl {} {}://{}:{}/id".format(
|
||||
CURL_OPTIONS, protocol.lower(), server_ip_address, server_port)
|
||||
|
||||
ssh_client = ssh.ssh_client(
|
||||
client_stack.floating_ip_address,
|
||||
username=client_stack.image_fixture.username)
|
||||
|
||||
ret = sh.ssh_execute(ssh_client, cmd)
|
||||
if ret.exit_status != 0:
|
||||
raise RequestException(command=cmd,
|
||||
error=ret.stderr)
|
||||
|
||||
return ret.stdout
|
||||
|
||||
def _wait_resource_operating_status(self, resource_type, operating_status,
|
||||
resource_get, *args):
|
||||
start = time.time()
|
||||
|
||||
while time.time() - start < CONF.tobiko.octavia.check_timeout:
|
||||
res = resource_get(*args)
|
||||
if res['operating_status'] == operating_status:
|
||||
return
|
||||
|
||||
time.sleep(CONF.tobiko.octavia.check_interval)
|
||||
|
||||
raise TimeoutException(
|
||||
reason=("Cannot get operating_status '{}' from {} {} "
|
||||
"within the timeout period.".format(
|
||||
operating_status, resource_type, args)))
|
||||
|
||||
def _wait_lb_operating_status(self, lb_id, operating_status):
|
||||
LOG.debug("Wait for loadbalancer {} to have '{}' "
|
||||
"operating_status".format(lb_id, operating_status))
|
||||
self._wait_resource_operating_status("loadbalancer",
|
||||
operating_status,
|
||||
octavia.get_loadbalancer,
|
||||
lb_id)
|
||||
|
||||
def _wait_for_request_data(self, client_stack, server_ip_address,
|
||||
server_protocol, server_port):
|
||||
"""Wait until a request on a server succeeds
|
||||
|
||||
Throws a TimeoutException after CONF.tobiko.octavia.check_timeout
|
||||
if the server doesn't reply.
|
||||
"""
|
||||
start = time.time()
|
||||
|
||||
while time.time() - start < CONF.tobiko.octavia.check_timeout:
|
||||
try:
|
||||
ret = self._request(client_stack, server_ip_address,
|
||||
server_protocol, server_port)
|
||||
except Exception as e:
|
||||
LOG.warning("Received exception {} while performing a "
|
||||
"request".format(e))
|
||||
else:
|
||||
return ret
|
||||
time.sleep(CONF.tobiko.octavia.check_interval)
|
||||
|
||||
raise TimeoutException(
|
||||
reason=("Cannot get data from {} on port {} with "
|
||||
"protocol {} within the timeout period.".format(
|
||||
server_ip_address, server_port,
|
||||
server_protocol)))
|
||||
|
||||
def _check_loadbalancer(self):
|
||||
"""Wait until the load balancer is functional."""
|
||||
|
||||
# Check load balancer status
|
||||
loadbalancer_id = self.loadbalancer_stack.loadbalancer_id
|
||||
self._wait_lb_operating_status(loadbalancer_id, 'ONLINE')
|
||||
|
||||
loadbalancer_vip = self.loadbalancer_stack.loadbalancer_vip
|
||||
loadbalancer_port = self.listener_stack.lb_port
|
||||
loadbalancer_protocol = self.listener_stack.lb_protocol
|
||||
|
||||
self._wait_for_request_data(self.client_stack,
|
||||
loadbalancer_vip,
|
||||
loadbalancer_protocol,
|
||||
loadbalancer_port)
|
||||
|
||||
def _check_member(self, member_stack):
|
||||
"""Wait until a member server is functional."""
|
||||
|
||||
member_ip = member_stack.server_stack.floating_ip_address
|
||||
member_port = member_stack.application_port
|
||||
member_protocol = self.listener_stack.pool_protocol
|
||||
|
||||
self._wait_for_request_data(self.client_stack, member_ip,
|
||||
member_protocol, member_port)
|
||||
|
||||
def _check_members_balanced(self):
|
||||
"""Check if traffic is properly balanced between members."""
|
||||
replies = {}
|
||||
|
||||
loadbalancer_vip = self.loadbalancer_stack.loadbalancer_vip
|
||||
loadbalancer_port = self.listener_stack.lb_port
|
||||
loadbalancer_protocol = self.listener_stack.lb_protocol
|
||||
|
||||
for _ in range(20):
|
||||
content = self._request(self.client_stack, loadbalancer_vip,
|
||||
loadbalancer_protocol, loadbalancer_port)
|
||||
|
||||
if content not in replies:
|
||||
replies[content] = 0
|
||||
replies[content] += 1
|
||||
|
||||
# wait one second (required when using cirros' nc fake webserver)
|
||||
time.sleep(1)
|
||||
|
||||
LOG.debug("Replies from load balancer: {}".format(
|
||||
replies))
|
||||
|
||||
# assert that 'members_count' servers replied
|
||||
self.assertEqual(len(replies), self.members_count)
|
||||
|
||||
if self.listener_stack.lb_algorithm == 'ROUND_ROBIN':
|
||||
# assert that requests have been fairly dispatched (each server
|
||||
# received the same number of requests)
|
||||
self.assertEqual(len(set(replies.values())), 1)
|
||||
|
||||
def test_traffic(self):
|
||||
self._check_members_balanced()
|
Loading…
Reference in New Issue
Block a user