You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
4504 lines
223 KiB
4504 lines
223 KiB
# Copyright 2014 Rackspace |
|
# |
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
|
# not use this file except in compliance with the License. You may obtain |
|
# a copy of the License at |
|
# |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
# |
|
# Unless required by applicable law or agreed to in writing, software |
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
|
# License for the specific language governing permissions and limitations |
|
# under the License. |
|
|
|
import copy |
|
import datetime |
|
import random |
|
from unittest import mock |
|
|
|
from oslo_config import cfg |
|
from oslo_config import fixture as oslo_fixture |
|
from oslo_db import exception as db_exception |
|
from oslo_utils import uuidutils |
|
from sqlalchemy.orm import defer |
|
from sqlalchemy.orm import exc as sa_exception |
|
|
|
from octavia.common import constants |
|
from octavia.common import data_models as models |
|
from octavia.common import exceptions |
|
from octavia.db import api as db_api |
|
from octavia.db import models as db_models |
|
from octavia.db import repositories as repo |
|
from octavia.tests.functional.db import base |
|
|
|
CONF = cfg.CONF |
|
|
|
|
|
class BaseRepositoryTest(base.OctaviaDBTestBase): |
|
|
|
FAKE_IP = "192.0.2.1" |
|
FAKE_UUID_1 = uuidutils.generate_uuid() |
|
FAKE_UUID_2 = uuidutils.generate_uuid() |
|
FAKE_UUID_3 = uuidutils.generate_uuid() |
|
FAKE_UUID_4 = uuidutils.generate_uuid() |
|
FAKE_UUID_5 = uuidutils.generate_uuid() |
|
FAKE_UUID_6 = uuidutils.generate_uuid() |
|
FAKE_UUID_7 = uuidutils.generate_uuid() |
|
FAKE_EXP_AGE = 10 |
|
|
|
def setUp(self): |
|
super(BaseRepositoryTest, self).setUp() |
|
self.pool_repo = repo.PoolRepository() |
|
self.member_repo = repo.MemberRepository() |
|
self.lb_repo = repo.LoadBalancerRepository() |
|
self.vip_repo = repo.VipRepository() |
|
self.listener_repo = repo.ListenerRepository() |
|
self.listener_stats_repo = repo.ListenerStatisticsRepository() |
|
self.sp_repo = repo.SessionPersistenceRepository() |
|
self.hm_repo = repo.HealthMonitorRepository() |
|
self.sni_repo = repo.SNIRepository() |
|
self.amphora_repo = repo.AmphoraRepository() |
|
self.amphora_health_repo = repo.AmphoraHealthRepository() |
|
self.vrrp_group_repo = repo.VRRPGroupRepository() |
|
self.l7policy_repo = repo.L7PolicyRepository() |
|
self.l7rule_repo = repo.L7RuleRepository() |
|
self.quota_repo = repo.QuotasRepository() |
|
self.flavor_repo = repo.FlavorRepository() |
|
self.flavor_profile_repo = repo.FlavorProfileRepository() |
|
|
|
def test_get_all_return_value(self): |
|
pool_list, _ = self.pool_repo.get_all(self.session, |
|
project_id=self.FAKE_UUID_2) |
|
self.assertIsInstance(pool_list, list) |
|
lb_list, _ = self.lb_repo.get_all(self.session, |
|
project_id=self.FAKE_UUID_2) |
|
self.assertIsInstance(lb_list, list) |
|
listener_list, _ = self.listener_repo.get_all( |
|
self.session, project_id=self.FAKE_UUID_2) |
|
self.assertIsInstance(listener_list, list) |
|
member_list, _ = self.member_repo.get_all(self.session, |
|
project_id=self.FAKE_UUID_2) |
|
self.assertIsInstance(member_list, list) |
|
fp_list, _ = self.flavor_profile_repo.get_all( |
|
self.session, id=self.FAKE_UUID_2) |
|
self.assertIsInstance(fp_list, list) |
|
flavor_list, _ = self.flavor_repo.get_all( |
|
self.session, id=self.FAKE_UUID_2) |
|
self.assertIsInstance(flavor_list, list) |
|
|
|
|
|
class AllRepositoriesTest(base.OctaviaDBTestBase): |
|
|
|
FAKE_UUID_1 = uuidutils.generate_uuid() |
|
FAKE_UUID_2 = uuidutils.generate_uuid() |
|
FAKE_UUID_3 = uuidutils.generate_uuid() |
|
FAKE_IP = '192.0.2.44' |
|
|
|
def setUp(self): |
|
super(AllRepositoriesTest, self).setUp() |
|
self.repos = repo.Repositories() |
|
self.load_balancer = self.repos.load_balancer.create( |
|
self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, |
|
name="lb_name", description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, enabled=True) |
|
self.listener = self.repos.listener.create( |
|
self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80, |
|
enabled=True, provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
load_balancer_id=self.load_balancer.id) |
|
self.amphora = self.repos.amphora.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
load_balancer_id=self.load_balancer.id, |
|
compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, |
|
vrrp_ip=self.FAKE_IP, lb_network_ip=self.FAKE_IP) |
|
|
|
def test_all_repos_has_correct_repos(self): |
|
repo_attr_names = ('load_balancer', 'vip', 'health_monitor', |
|
'session_persistence', 'pool', 'member', 'listener', |
|
'listener_stats', 'amphora', 'sni', |
|
'amphorahealth', 'vrrpgroup', 'l7rule', 'l7policy', |
|
'amp_build_slots', 'amp_build_req', 'quotas', |
|
'flavor', 'flavor_profile', 'spares_pool', |
|
'listener_cidr', 'availability_zone', |
|
'availability_zone_profile') |
|
for repo_attr in repo_attr_names: |
|
single_repo = getattr(self.repos, repo_attr, None) |
|
message = ("Class Repositories should have %s instance" |
|
" variable.") % repo_attr |
|
self.assertIsNotNone(single_repo, message=message) |
|
message = (("instance variable, %(repo_name)s, of class " |
|
"Repositories should be an instance of %(base)s") % |
|
{'repo_name': repo_attr, |
|
'base': repo.BaseRepository.__name__}) |
|
self.assertIsInstance(single_repo, repo.BaseRepository, |
|
msg=message) |
|
|
|
for attr in vars(self.repos): |
|
if attr.startswith('_') or attr in repo_attr_names: |
|
continue |
|
possible_repo = getattr(self.repos, attr, None) |
|
message = ('Class Repositories is not expected to have %s instance' |
|
' variable as a repository.' % attr) |
|
self.assertNotIsInstance(possible_repo, repo.BaseRepository, |
|
msg=message) |
|
|
|
def test_create_load_balancer_and_vip(self): |
|
lb = {'name': 'test1', 'description': 'desc1', 'enabled': True, |
|
'provisioning_status': constants.PENDING_UPDATE, |
|
'operating_status': constants.OFFLINE, |
|
'topology': constants.TOPOLOGY_ACTIVE_STANDBY, |
|
'vrrp_group': None, |
|
'provider': 'amphora', |
|
'server_group_id': uuidutils.generate_uuid(), |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid(), 'flavor_id': None, |
|
'tags': ['test_tag']} |
|
vip = {'ip_address': '192.0.2.1', |
|
'port_id': uuidutils.generate_uuid(), |
|
'subnet_id': uuidutils.generate_uuid(), |
|
'network_id': uuidutils.generate_uuid(), |
|
'qos_policy_id': None, 'octavia_owned': True} |
|
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip) |
|
lb_dm_dict = lb_dm.to_dict() |
|
del lb_dm_dict['vip'] |
|
del lb_dm_dict['listeners'] |
|
del lb_dm_dict['amphorae'] |
|
del lb_dm_dict['pools'] |
|
del lb_dm_dict['created_at'] |
|
del lb_dm_dict['updated_at'] |
|
self.assertIsNone(lb_dm_dict.pop('availability_zone')) |
|
self.assertEqual(lb, lb_dm_dict) |
|
vip_dm_dict = lb_dm.vip.to_dict() |
|
vip_dm_dict['load_balancer_id'] = lb_dm.id |
|
del vip_dm_dict['load_balancer'] |
|
self.assertEqual(vip, vip_dm_dict) |
|
|
|
def test_create_pool_on_listener_without_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid(), |
|
'provisioning_status': constants.ACTIVE, |
|
'tags': ['test_tag'], |
|
'tls_certificate_id': uuidutils.generate_uuid(), |
|
'tls_enabled': False, 'tls_ciphers': None} |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
pool_dm_dict = pool_dm.to_dict() |
|
# These are not defiend in the sample pool dict but will |
|
# be in the live data. |
|
del pool_dm_dict['members'] |
|
del pool_dm_dict['health_monitor'] |
|
del pool_dm_dict['session_persistence'] |
|
del pool_dm_dict['listeners'] |
|
del pool_dm_dict['load_balancer'] |
|
del pool_dm_dict['load_balancer_id'] |
|
del pool_dm_dict['l7policies'] |
|
del pool_dm_dict['created_at'] |
|
del pool_dm_dict['updated_at'] |
|
del pool_dm_dict['ca_tls_certificate_id'] |
|
del pool_dm_dict['crl_container_id'] |
|
self.assertEqual(pool, pool_dm_dict) |
|
new_listener = self.repos.listener.get(self.session, |
|
id=self.listener.id) |
|
self.assertEqual(pool_dm.id, new_listener.default_pool_id) |
|
|
|
def test_create_pool_on_listener_with_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid(), |
|
'provisioning_status': constants.ACTIVE, |
|
'tags': ['test_tag'], |
|
'tls_certificate_id': uuidutils.generate_uuid(), |
|
'tls_enabled': False, |
|
'tls_ciphers': None} |
|
sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, |
|
'cookie_name': 'cookie_monster', |
|
'pool_id': pool['id'], |
|
'persistence_granularity': None, |
|
'persistence_timeout': None} |
|
pool.update({'session_persistence': sp}) |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
pool_dm_dict = pool_dm.to_dict() |
|
# These are not defiend in the sample pool dict but will |
|
# be in the live data. |
|
del pool_dm_dict['members'] |
|
del pool_dm_dict['health_monitor'] |
|
del pool_dm_dict['session_persistence'] |
|
del pool_dm_dict['listeners'] |
|
del pool_dm_dict['load_balancer'] |
|
del pool_dm_dict['load_balancer_id'] |
|
del pool_dm_dict['l7policies'] |
|
del pool_dm_dict['created_at'] |
|
del pool_dm_dict['updated_at'] |
|
del pool_dm_dict['ca_tls_certificate_id'] |
|
del pool_dm_dict['crl_container_id'] |
|
self.assertEqual(pool, pool_dm_dict) |
|
sp_dm_dict = pool_dm.session_persistence.to_dict() |
|
del sp_dm_dict['pool'] |
|
sp['pool_id'] = pool_dm.id |
|
self.assertEqual(sp, sp_dm_dict) |
|
new_listener = self.repos.listener.get(self.session, |
|
id=self.listener.id) |
|
self.assertEqual(pool_dm.id, new_listener.default_pool_id) |
|
new_sp = self.repos.session_persistence.get(self.session, |
|
pool_id=pool_dm.id) |
|
self.assertIsNotNone(new_sp) |
|
|
|
def test_update_pool_without_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid(), |
|
'provisioning_status': constants.ACTIVE, |
|
'tags': ['test_tag'], 'tls_enabled': False, |
|
'tls_ciphers': None} |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} |
|
new_pool_dm = self.repos.update_pool_and_sp( |
|
self.session, pool_dm.id, update_pool) |
|
pool_dm_dict = new_pool_dm.to_dict() |
|
# These are not defiend in the sample pool dict but will |
|
# be in the live data. |
|
del pool_dm_dict['members'] |
|
del pool_dm_dict['health_monitor'] |
|
del pool_dm_dict['session_persistence'] |
|
del pool_dm_dict['listeners'] |
|
del pool_dm_dict['load_balancer'] |
|
del pool_dm_dict['load_balancer_id'] |
|
del pool_dm_dict['l7policies'] |
|
del pool_dm_dict['created_at'] |
|
del pool_dm_dict['updated_at'] |
|
del pool_dm_dict['ca_tls_certificate_id'] |
|
del pool_dm_dict['crl_container_id'] |
|
pool.update(update_pool) |
|
pool['tls_certificate_id'] = None |
|
self.assertEqual(pool, pool_dm_dict) |
|
self.assertIsNone(new_pool_dm.session_persistence) |
|
|
|
def test_update_pool_with_existing_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid(), |
|
'provisioning_status': constants.ACTIVE, |
|
'tags': ['test_tag'], |
|
'tls_certificate_id': uuidutils.generate_uuid(), |
|
'tls_enabled': False, 'tls_ciphers': None} |
|
sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, |
|
'cookie_name': 'cookie_monster', |
|
'pool_id': pool['id'], |
|
'persistence_granularity': None, |
|
'persistence_timeout': None} |
|
pool.update({'session_persistence': sp}) |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} |
|
update_sp = {'type': constants.SESSION_PERSISTENCE_SOURCE_IP} |
|
update_pool.update({'session_persistence': update_sp}) |
|
new_pool_dm = self.repos.update_pool_and_sp( |
|
self.session, pool_dm.id, update_pool) |
|
pool_dm_dict = new_pool_dm.to_dict() |
|
# These are not defiend in the sample pool dict but will |
|
# be in the live data. |
|
del pool_dm_dict['members'] |
|
del pool_dm_dict['health_monitor'] |
|
del pool_dm_dict['session_persistence'] |
|
del pool_dm_dict['listeners'] |
|
del pool_dm_dict['load_balancer'] |
|
del pool_dm_dict['load_balancer_id'] |
|
del pool_dm_dict['l7policies'] |
|
del pool_dm_dict['created_at'] |
|
del pool_dm_dict['updated_at'] |
|
del pool_dm_dict['ca_tls_certificate_id'] |
|
del pool_dm_dict['crl_container_id'] |
|
pool.update(update_pool) |
|
self.assertEqual(pool, pool_dm_dict) |
|
sp_dm_dict = new_pool_dm.session_persistence.to_dict() |
|
del sp_dm_dict['pool'] |
|
sp['pool_id'] = pool_dm.id |
|
sp.update(update_sp) |
|
self.assertEqual(sp, sp_dm_dict) |
|
|
|
def test_update_pool_with_nonexisting_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.ACTIVE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid()} |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} |
|
update_sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, |
|
'cookie_name': 'monster_cookie', |
|
'persistence_granularity': None, |
|
'persistence_timeout': None} |
|
update_pool.update({'session_persistence': update_sp}) |
|
new_pool_dm = self.repos.update_pool_and_sp( |
|
self.session, pool_dm.id, update_pool) |
|
sp_dm_dict = new_pool_dm.session_persistence.to_dict() |
|
del sp_dm_dict['pool'] |
|
update_sp['pool_id'] = pool_dm.id |
|
update_sp.update(update_sp) |
|
self.assertEqual(update_sp, sp_dm_dict) |
|
|
|
def test_update_pool_with_nonexisting_sp_delete_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.ACTIVE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid()} |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool', |
|
'session_persistence': None} |
|
new_pool_dm = self.repos.update_pool_and_sp( |
|
self.session, pool_dm.id, update_pool) |
|
self.assertIsNone(new_pool_dm.session_persistence) |
|
|
|
def test_update_pool_with_existing_sp_delete_sp(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid()} |
|
sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, |
|
'cookie_name': 'cookie_monster', |
|
'pool_id': pool['id']} |
|
pool.update({'session_persistence': sp}) |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool', |
|
'session_persistence': {}} |
|
new_pool_dm = self.repos.update_pool_and_sp( |
|
self.session, pool_dm.id, update_pool) |
|
self.assertIsNone(new_pool_dm.session_persistence) |
|
|
|
def test_update_pool_with_cert(self): |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': uuidutils.generate_uuid(), |
|
'id': uuidutils.generate_uuid(), |
|
'provisioning_status': constants.ACTIVE, |
|
'tls_enabled': False, 'tls_ciphers': None} |
|
pool_dm = self.repos.create_pool_on_load_balancer( |
|
self.session, pool, listener_id=self.listener.id) |
|
update_pool = {'tls_certificate_id': uuidutils.generate_uuid()} |
|
new_pool_dm = self.repos.update_pool_and_sp( |
|
self.session, pool_dm.id, update_pool) |
|
pool_dm_dict = new_pool_dm.to_dict() |
|
# These are not defiend in the sample pool dict but will |
|
# be in the live data. |
|
del pool_dm_dict['members'] |
|
del pool_dm_dict['health_monitor'] |
|
del pool_dm_dict['session_persistence'] |
|
del pool_dm_dict['listeners'] |
|
del pool_dm_dict['load_balancer'] |
|
del pool_dm_dict['load_balancer_id'] |
|
del pool_dm_dict['l7policies'] |
|
del pool_dm_dict['created_at'] |
|
del pool_dm_dict['updated_at'] |
|
del pool_dm_dict['tags'] |
|
del pool_dm_dict['ca_tls_certificate_id'] |
|
del pool_dm_dict['crl_container_id'] |
|
pool.update(update_pool) |
|
self.assertEqual(pool, pool_dm_dict) |
|
|
|
def test_create_load_balancer_tree(self): |
|
self.skipTest("SLQAlchemy/PySqlite transaction handling is broken. " |
|
"Version 1.3.16 of sqlachemy changes how sqlite3 " |
|
"transactions are handled and this test fails as " |
|
"The LB created early in this process now disappears " |
|
"from the transaction context.") |
|
project_id = uuidutils.generate_uuid() |
|
member = {'project_id': project_id, 'ip_address': '11.0.0.1', |
|
'protocol_port': 80, 'enabled': True, 'backup': False, |
|
'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'id': uuidutils.generate_uuid()} |
|
health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, |
|
'timeout': 1, 'fall_threshold': 1, |
|
'rise_threshold': 1, 'enabled': True, |
|
'operating_status': constants.OFFLINE, |
|
'provisioning_status': constants.PENDING_CREATE} |
|
sp = {'type': constants.SESSION_PERSISTENCE_APP_COOKIE, |
|
'cookie_name': 'cookie_name'} |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', 'listener_id': None, |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'project_id': project_id, 'members': [member], |
|
'health_monitor': health_monitor, 'session_persistence': sp, |
|
'id': uuidutils.generate_uuid()} |
|
sp['pool_id'] = pool.get('id') |
|
member['pool_id'] = pool.get('id') |
|
health_monitor['pool_id'] = pool.get('id') |
|
l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, |
|
'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, |
|
'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'value': 'localhost', |
|
'enabled': True} |
|
r_health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, |
|
'timeout': 1, 'fall_threshold': 1, |
|
'rise_threshold': 1, 'enabled': True, |
|
'operating_status': constants.OFFLINE, |
|
'provisioning_status': constants.PENDING_CREATE} |
|
redirect_pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', 'project_id': project_id, |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'id': uuidutils.generate_uuid(), |
|
'health_monitor': r_health_monitor} |
|
l7policy = {'name': 'l7policy1', 'enabled': True, |
|
'description': 'l7policy_description', 'position': 1, |
|
'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, |
|
'redirect_pool': redirect_pool, 'l7rules': [l7rule], |
|
'redirect_pool_id': redirect_pool.get('id'), |
|
'id': uuidutils.generate_uuid(), |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE} |
|
l7rule['l7policy_id'] = l7policy.get('id') |
|
listener = {'project_id': project_id, 'name': 'listener1', |
|
'description': 'listener_description', |
|
'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, |
|
'connection_limit': 1, 'enabled': True, |
|
'default_pool': pool, 'l7policies': [l7policy], |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
l7policy['listener_id'] = listener.get('id') |
|
vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), |
|
'subnet_id': uuidutils.generate_uuid()} |
|
lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, |
|
'topology': constants.TOPOLOGY_ACTIVE_STANDBY, |
|
'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), |
|
'project_id': project_id, 'vip': vip, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid(), 'listeners': [listener]} |
|
listener['load_balancer_id'] = lb.get('id') |
|
pool['load_balancer_id'] = lb.get('id') |
|
redirect_pool['load_balancer_id'] = lb.get('id') |
|
lock_session = db_api.get_session(autocommit=False) |
|
db_lb = self.repos.create_load_balancer_tree(self.session, |
|
lock_session, lb) |
|
self.assertIsNotNone(db_lb) |
|
self.assertIsInstance(db_lb, models.LoadBalancer) |
|
|
|
def test_sqlite_transactions_broken(self): |
|
self.skipTest("SLQAlchemy/PySqlite transaction handling is broken. " |
|
"Version 1.3.16 of sqlachemy changes how sqlite3 " |
|
"transactions are handled and this test fails as " |
|
"The LB created early in this process now disappears " |
|
"from the transaction context.") |
|
"""This test is a canary for pysqlite fixing transaction handling. |
|
|
|
When this test starts failing, we can fix and un-skip the deadlock |
|
test below: `test_create_load_balancer_tree_quotas`. |
|
""" |
|
project_id = uuidutils.generate_uuid() |
|
vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), |
|
'subnet_id': uuidutils.generate_uuid()} |
|
lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, |
|
'topology': constants.TOPOLOGY_ACTIVE_STANDBY, |
|
'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), |
|
'project_id': project_id, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
|
|
session = db_api.get_session() |
|
lock_session = db_api.get_session(autocommit=False) |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(0, len(lbs)) # Initially: 0 |
|
self.repos.create_load_balancer_and_vip(lock_session, lb, vip) |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(1, len(lbs)) # After create: 1 |
|
lock_session.rollback() |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(0, len(lbs)) # After rollback: 0 |
|
self.repos.create_load_balancer_and_vip(lock_session, lb, vip) |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(1, len(lbs)) # After create: 1 |
|
lock_session.rollback() |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(0, len(lbs)) # After rollback: 0 |
|
# Force a count(), which breaks transaction integrity in pysqlite |
|
session.query(db_models.LoadBalancer).filter( |
|
db_models.LoadBalancer.project_id == project_id).count() |
|
self.repos.create_load_balancer_and_vip(lock_session, lb, vip) |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(1, len(lbs)) # After create: 1 |
|
lock_session.rollback() |
|
lbs = lock_session.query(db_models.LoadBalancer).filter_by( |
|
project_id=project_id).all() |
|
self.assertEqual(1, len(lbs)) # After rollback: 1 (broken!) |
|
|
|
def test_create_load_balancer_tree_quotas(self): |
|
self.skipTest("PySqlite transaction handling is broken. We can unskip" |
|
"this when `test_sqlite_transactions_broken` fails.") |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
project_id = uuidutils.generate_uuid() |
|
member = {'project_id': project_id, 'ip_address': '11.0.0.1', |
|
'protocol_port': 80, 'enabled': True, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
member2 = {'project_id': project_id, 'ip_address': '11.0.0.2', |
|
'protocol_port': 81, 'enabled': True, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
member3 = {'project_id': project_id, 'ip_address': '11.0.0.3', |
|
'protocol_port': 81, 'enabled': True, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, |
|
'timeout': 1, 'fall_threshold': 1, |
|
'rise_threshold': 1, 'enabled': True} |
|
sp = {'type': constants.SESSION_PERSISTENCE_APP_COOKIE, |
|
'cookie_name': 'cookie_name'} |
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', 'listener_id': None, |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': project_id, 'members': [member], |
|
'health_monitor': health_monitor, 'session_persistence': sp, |
|
'id': uuidutils.generate_uuid()} |
|
pool2 = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool2', |
|
'description': 'desc1', 'listener_id': None, |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': project_id, 'members': [member2], |
|
'health_monitor': health_monitor, |
|
'id': uuidutils.generate_uuid()} |
|
sp['pool_id'] = pool.get('id') |
|
member['pool_id'] = pool.get('id') |
|
health_monitor['pool_id'] = pool.get('id') |
|
l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, |
|
'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, |
|
'value': 'localhost'} |
|
r_health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, |
|
'timeout': 1, 'fall_threshold': 1, |
|
'rise_threshold': 1, 'enabled': True} |
|
redirect_pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', |
|
'description': 'desc1', 'project_id': project_id, |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid(), |
|
'health_monitor': r_health_monitor, |
|
'members': [member3]} |
|
l7policy = {'name': 'l7policy1', 'enabled': True, |
|
'description': 'l7policy_description', 'position': 1, |
|
'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, |
|
'redirect_pool': redirect_pool, 'l7rules': [l7rule], |
|
'redirect_pool_id': redirect_pool.get('id'), |
|
'id': uuidutils.generate_uuid()} |
|
l7rule['l7policy_id'] = l7policy.get('id') |
|
listener = {'project_id': project_id, 'name': 'listener1', |
|
'description': 'listener_description', |
|
'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, |
|
'connection_limit': 1, 'enabled': True, |
|
'default_pool': pool, 'l7policies': [l7policy], |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
listener2 = {'project_id': project_id, 'name': 'listener2', |
|
'description': 'listener_description', |
|
'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 83, |
|
'connection_limit': 1, 'enabled': True, |
|
'default_pool': pool2, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
l7policy['listener_id'] = listener.get('id') |
|
vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), |
|
'subnet_id': uuidutils.generate_uuid()} |
|
lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, |
|
'topology': constants.TOPOLOGY_ACTIVE_STANDBY, |
|
'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), |
|
'project_id': project_id, 'vip': vip, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid(), 'listeners': [listener, |
|
listener2]} |
|
listener['load_balancer_id'] = lb.get('id') |
|
listener2['load_balancer_id'] = lb.get('id') |
|
pool['load_balancer_id'] = lb.get('id') |
|
redirect_pool['load_balancer_id'] = lb.get('id') |
|
|
|
lb2_health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, |
|
'delay': 1, 'timeout': 1, 'fall_threshold': 1, |
|
'rise_threshold': 1, 'enabled': True} |
|
lb2_member = {'project_id': project_id, 'ip_address': '11.0.0.3', |
|
'protocol_port': 80, 'enabled': True, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
lb2_pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'lb2_pool', |
|
'description': 'desc1', 'listener_id': None, |
|
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, |
|
'enabled': True, 'operating_status': constants.ONLINE, |
|
'project_id': project_id, 'members': [lb2_member], |
|
'health_monitor': lb2_health_monitor, |
|
'session_persistence': sp, |
|
'id': uuidutils.generate_uuid()} |
|
lb2_listener = {'project_id': project_id, 'name': 'lb2_listener', |
|
'description': 'listener_description', |
|
'protocol': constants.PROTOCOL_HTTP, |
|
'protocol_port': 83, 'connection_limit': 1, |
|
'enabled': True, |
|
'default_pool': lb2_pool, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid()} |
|
lb2 = {'name': 'lb2', 'description': 'desc2', 'enabled': True, |
|
'topology': constants.TOPOLOGY_ACTIVE_STANDBY, |
|
'vrrp_group': None, |
|
'server_group_id': uuidutils.generate_uuid(), |
|
'project_id': project_id, 'vip': vip, |
|
'provisioning_status': constants.PENDING_CREATE, |
|
'operating_status': constants.ONLINE, |
|
'id': uuidutils.generate_uuid(), 'listeners': [lb2_listener]} |
|
lb2_listener['load_balancer_id'] = lb2.get('id') |
|
lb2_pool['load_balancer_id'] = lb2.get('id') |
|
|
|
# Test zero quota |
|
quota = {'load_balancer': 0, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
quota = {'load_balancer': 10, |
|
'listener': 0, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 0, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 0, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
# Test l7policy quota for pools |
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 1, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
# Test l7policy quota for health monitor |
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 1, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
# Test l7policy quota for member |
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
|
|
# ### Test load balancer quota |
|
# Test one quota, attempt to create another |
|
quota = {'load_balancer': 1, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.repos.create_load_balancer_tree(self.session, lock_session, |
|
copy.deepcopy(lb)) |
|
# Check if first LB build passed quota checks |
|
self.assertIsNotNone(self.repos.load_balancer.get(self.session, |
|
name='lb1')) |
|
# Try building another LB, it should fail |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb2)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb2')) |
|
|
|
# ### Test listener quota |
|
# Create with custom quotas and limit to two listener (lb has two), |
|
# expect error of too many listeners/over quota |
|
quota = {'load_balancer': 10, |
|
'listener': 2, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb2)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb2')) |
|
|
|
# ### Test pool quota |
|
# Create with custom quotas and limit to two pools (lb has two), |
|
# expect error of too many pool/over quota |
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 2, |
|
'health_monitor': 10, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb2)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb2')) |
|
|
|
# ### Test health monitor quota |
|
# Create with custom quotas and limit to one health monitor, |
|
# expect error of too many health monitor/over quota |
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 1, |
|
'member': 10} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb2)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb2')) |
|
|
|
# ### Test member quota |
|
# Create with custom quotas and limit to two member (lb has two), |
|
# expect error of too many member/over quota |
|
quota = {'load_balancer': 10, |
|
'listener': 10, |
|
'pool': 10, |
|
'health_monitor': 10, |
|
'member': 2} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
lock_session = db_api.get_session(autocommit=False) |
|
self.assertRaises( |
|
exceptions.QuotaException, |
|
self.repos.create_load_balancer_tree, |
|
self.session, lock_session, copy.deepcopy(lb2)) |
|
# Make sure we didn't create the load balancer anyway |
|
self.assertIsNone(self.repos.load_balancer.get(self.session, |
|
name='lb2')) |
|
|
|
def test_check_quota_met(self): |
|
|
|
project_id = uuidutils.generate_uuid() |
|
|
|
# Test auth_strategy == NOAUTH |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test check for missing project_id |
|
self.assertRaises(exceptions.MissingProjectID, |
|
self.repos.check_quota_met, |
|
self.session, self.session, |
|
models.LoadBalancer, None) |
|
|
|
# Test non-quota object |
|
project_id = uuidutils.generate_uuid() |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.SessionPersistence, |
|
project_id)) |
|
# Test DB deadlock case |
|
project_id = uuidutils.generate_uuid() |
|
mock_session = mock.MagicMock() |
|
mock_session.query = mock.MagicMock( |
|
side_effect=db_exception.DBDeadlock) |
|
self.assertRaises(exceptions.ProjectBusyException, |
|
self.repos.check_quota_met, |
|
self.session, mock_session, |
|
models.LoadBalancer, project_id) |
|
|
|
# ### Test load balancer quota |
|
# Test with no pre-existing quota record default 0 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=0) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertIsNone(self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test with no pre-existing quota record default 1 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=1) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test with no pre-existing quota record default unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', |
|
default_load_balancer_quota=constants.QUOTA_UNLIMITED) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
# Test above project adding another load balancer |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test upgrade case with pre-quota load balancers |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
|
|
# Test upgrade case with pre-quota deleted load balancers |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.DELETED, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test pre-existing quota with quota of zero |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=10) |
|
quota = {'load_balancer': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
|
|
# Test pre-existing quota with quota of one |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=0) |
|
quota = {'load_balancer': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test pre-existing quota with quota of unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_load_balancer_quota=0) |
|
quota = {'load_balancer': constants.QUOTA_UNLIMITED} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
# Test above project adding another load balancer |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.LoadBalancer, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# ### Test listener quota |
|
# Test with no pre-existing quota record default 0 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=0) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertIsNone(self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test with no pre-existing quota record default 1 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=1) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test with no pre-existing quota record default unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', |
|
default_listener_quota=constants.QUOTA_UNLIMITED) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
# Test above project adding another listener |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test upgrade case with pre-quota listener |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
self.repos.listener.create( |
|
self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80, |
|
enabled=True, provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, project_id=project_id, |
|
load_balancer_id=lb.id) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
|
|
# Test upgrade case with pre-quota deleted listener |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
self.repos.listener.create( |
|
self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80, |
|
enabled=True, provisioning_status=constants.DELETED, |
|
operating_status=constants.ONLINE, project_id=project_id, |
|
load_balancer_id=lb.id) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test pre-existing quota with quota of zero |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=10) |
|
quota = {'listener': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
|
|
# Test pre-existing quota with quota of one |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=0) |
|
quota = {'listener': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test pre-existing quota with quota of unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_listener_quota=0) |
|
quota = {'listener': constants.QUOTA_UNLIMITED} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
# Test above project adding another listener |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Listener, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# ### Test pool quota |
|
# Test with no pre-existing quota record default 0 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=0) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertIsNone(self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test with no pre-existing quota record default 1 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=1) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test with no pre-existing quota record default unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', |
|
default_pool_quota=constants.QUOTA_UNLIMITED) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
# Test above project adding another pool |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test upgrade case with pre-quota pool |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
pool = self.repos.pool.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="pool1", |
|
protocol=constants.PROTOCOL_HTTP, |
|
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, load_balancer_id=lb.id) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
|
|
# Test upgrade case with pre-quota deleted pool |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
pool = self.repos.pool.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="pool1", |
|
protocol=constants.PROTOCOL_HTTP, |
|
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, |
|
provisioning_status=constants.DELETED, |
|
operating_status=constants.ONLINE, |
|
enabled=True, load_balancer_id=lb.id) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test pre-existing quota with quota of zero |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=10) |
|
quota = {'pool': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
|
|
# Test pre-existing quota with quota of one |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=0) |
|
quota = {'pool': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test pre-existing quota with quota of unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_pool_quota=0) |
|
quota = {'pool': constants.QUOTA_UNLIMITED} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
# Test above project adding another pool |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Pool, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# ### Test health monitor quota |
|
# Test with no pre-existing quota record default 0 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=0) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertIsNone(self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test with no pre-existing quota record default 1 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=1) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test with no pre-existing quota record default unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', |
|
default_health_monitor_quota=constants.QUOTA_UNLIMITED) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
# Test above project adding another health monitor |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test upgrade case with pre-quota health monitor |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
pool = self.repos.pool.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="pool1", |
|
protocol=constants.PROTOCOL_HTTP, |
|
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, load_balancer_id=lb.id) |
|
self.repos.health_monitor.create( |
|
self.session, project_id=project_id, |
|
name="health_mon1", type=constants.HEALTH_MONITOR_HTTP, |
|
delay=1, timeout=1, fall_threshold=1, rise_threshold=1, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, pool_id=pool.id) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
|
|
# Test upgrade case with pre-quota deleted health monitor |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
pool = self.repos.pool.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="pool1", |
|
protocol=constants.PROTOCOL_HTTP, |
|
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, load_balancer_id=lb.id) |
|
self.repos.health_monitor.create( |
|
self.session, project_id=project_id, |
|
name="health_mon1", type=constants.HEALTH_MONITOR_HTTP, |
|
delay=1, timeout=1, fall_threshold=1, rise_threshold=1, |
|
provisioning_status=constants.DELETED, |
|
operating_status=constants.OFFLINE, |
|
enabled=True, pool_id=pool.id) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test pre-existing quota with quota of zero |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=10) |
|
quota = {'health_monitor': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
|
|
# Test pre-existing quota with quota of one |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=0) |
|
quota = {'health_monitor': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test pre-existing quota with quota of unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_health_monitor_quota=0) |
|
quota = {'health_monitor': constants.QUOTA_UNLIMITED} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
# Test above project adding another health monitor |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.HealthMonitor, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# ### Test member quota |
|
# Test with no pre-existing quota record default 0 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=0) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertIsNone(self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test with no pre-existing quota record default 1 |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=1) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test with no pre-existing quota record default unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', |
|
default_member_quota=constants.QUOTA_UNLIMITED) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
# Test above project adding another member |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test upgrade case with pre-quota member |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
pool = self.repos.pool.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="pool1", |
|
protocol=constants.PROTOCOL_HTTP, |
|
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, load_balancer_id=lb.id) |
|
self.repos.member.create( |
|
self.session, project_id=project_id, |
|
ip_address='192.0.2.1', protocol_port=80, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, pool_id=pool.id, backup=False) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
|
|
# Test upgrade case with pre-quota deleted member |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=1) |
|
lb = self.repos.load_balancer.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="lb_name", |
|
description="lb_description", |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True) |
|
pool = self.repos.pool.create( |
|
self.session, id=uuidutils.generate_uuid(), |
|
project_id=project_id, name="pool1", |
|
protocol=constants.PROTOCOL_HTTP, |
|
lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, |
|
provisioning_status=constants.ACTIVE, |
|
operating_status=constants.ONLINE, |
|
enabled=True, load_balancer_id=lb.id) |
|
self.repos.member.create( |
|
self.session, project_id=project_id, |
|
ip_address='192.0.2.1', protocol_port=80, |
|
provisioning_status=constants.DELETED, |
|
operating_status=constants.ONLINE, |
|
enabled=True, pool_id=pool.id, backup=False) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test pre-existing quota with quota of zero |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=10) |
|
quota = {'member': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
|
|
# Test pre-existing quota with quota of one |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=0) |
|
quota = {'member': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
# Test above project is now at quota |
|
self.assertTrue(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test pre-existing quota with quota of unlimited |
|
project_id = uuidutils.generate_uuid() |
|
conf.config(group='quotas', default_member_quota=0) |
|
quota = {'member': constants.QUOTA_UNLIMITED} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(1, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
# Test above project adding another member |
|
self.assertFalse(self.repos.check_quota_met(self.session, |
|
self.session, |
|
models.Member, |
|
project_id)) |
|
self.assertEqual(2, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
def test_decrement_quota(self): |
|
# Test decrement on non-existent quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
self.repos.decrement_quota(self.session, |
|
models.LoadBalancer, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.count(self.session, |
|
project_id=project_id)) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test decrement on non-existent quota |
|
project_id = uuidutils.generate_uuid() |
|
self.repos.decrement_quota(self.session, |
|
models.LoadBalancer, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.count(self.session, |
|
project_id=project_id)) |
|
|
|
# Test DB deadlock case |
|
project_id = uuidutils.generate_uuid() |
|
mock_session = mock.MagicMock() |
|
mock_session.query = mock.MagicMock( |
|
side_effect=db_exception.DBDeadlock) |
|
self.assertRaises(exceptions.ProjectBusyException, |
|
self.repos.decrement_quota, |
|
mock_session, |
|
models.LoadBalancer, project_id) |
|
|
|
# ### Test load balancer quota |
|
# Test decrement on zero in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_load_balancer': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.LoadBalancer, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test decrement on zero in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_load_balancer': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.LoadBalancer, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test decrement on in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_load_balancer': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.LoadBalancer, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
|
|
# Test decrement on in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_load_balancer': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.LoadBalancer, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_load_balancer) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# ### Test listener quota |
|
# Test decrement on zero in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_listener': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Listener, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test decrement on zero in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_listener': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Listener, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test decrement on in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_listener': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Listener, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
|
|
# Test decrement on in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_listener': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Listener, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_listener) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# ### Test pool quota |
|
# Test decrement on zero in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_pool': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Pool, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test decrement on zero in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_pool': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Pool, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test decrement on in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_pool': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Pool, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
|
|
# Test decrement on in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_pool': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Pool, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_pool) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# ### Test health monitor quota |
|
# Test decrement on zero in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_health_monitor': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.HealthMonitor, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test decrement on zero in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_health_monitor': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.HealthMonitor, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test decrement on in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_health_monitor': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.HealthMonitor, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
|
|
# Test decrement on in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_health_monitor': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.HealthMonitor, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_health_monitor) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# ### Test member quota |
|
# Test decrement on zero in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_member': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Member, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test decrement on zero in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_member': 0} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Member, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
# Test decrement on in use quota |
|
project_id = uuidutils.generate_uuid() |
|
quota = {'in_use_member': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Member, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
|
|
# Test decrement on in use quota with noauth |
|
project_id = uuidutils.generate_uuid() |
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) |
|
conf.config(group='api_settings', auth_strategy=constants.NOAUTH) |
|
quota = {'in_use_member': 1} |
|
self.repos.quotas.update(self.session, project_id, quota=quota) |
|
self.repos.decrement_quota(self.session, |
|
models.Member, |
|
project_id) |
|
self.assertEqual(0, self.repos.quotas.get( |
|
self.session, project_id=project_id).in_use_member) |
|
conf.config(group='api_settings', auth_strategy=constants.TESTING) |
|
|
|
def test_get_amphora_stats(self): |
|
listener2_id = uuidutils.generate_uuid() |
|
self.repos.listener_stats.create( |
|
self.session, listener_id=self.listener.id, |
|
amphora_id=self.amphora.id, bytes_in=1, bytes_out=2, |
|
active_connections=3, total_connections=4, request_errors=5) |
|
self.repos.listener_stats.create( |
|
self.session, listener_id=listener2_id, |
|
amphora_id=self.amphora.id, bytes_in=6, bytes_out=7, |
|
active_connections=8, total_connections=9, request_errors=10) |
|
amp_stats = self.repos.get_amphora_stats(self.session, self.amphora.id) |