Add quota support to Octavia
Octavia has no quota definitions, but needs them for parity with Neutron LBaaS. This will provide an endpoint and support for retrieving, updating, and deleting quotas for projects, as well as adding enforcement of those those quotas. Adds scenario test that simply validates quotas in a lb graph. Co-Authored-By: Michael Johnson <johnsomor@gmail.com> Co-Authored-By: Phillip Toohill <phillip.toohill@rackspace.com> Co-Authored-By: Adam Harwell <flux.adam@gmail.com> Change-Id: Ia1d85dcd931a57a2fa3f6276d3fe6dabfeadd15e Closes-Bug: #1596652
This commit is contained in:
parent
1aedd8a0f5
commit
7d933da31e
File diff suppressed because it is too large
Load Diff
@ -52,7 +52,8 @@ APIs
|
||||
:glob:
|
||||
:maxdepth: 1
|
||||
|
||||
api/*
|
||||
api/octaviaapi.rst
|
||||
api/haproxy-amphora-api.rst
|
||||
|
||||
====================
|
||||
Design Documentation
|
||||
|
@ -308,3 +308,10 @@
|
||||
# CA certificates file to verify neutron connections when TLS is enabled
|
||||
# insecure = False
|
||||
# ca_certificates_file =
|
||||
|
||||
[quotas]
|
||||
# default_load_balancer_quota = -1
|
||||
# default_listener_quota = -1
|
||||
# default_member_quota = -1
|
||||
# default_pool_quota = -1
|
||||
# default_health_monitor_quota = -1
|
||||
|
@ -17,11 +17,13 @@ from wsmeext import pecan as wsme_pecan
|
||||
|
||||
from octavia.api.v1.controllers import base
|
||||
from octavia.api.v1.controllers import load_balancer
|
||||
from octavia.api.v1.controllers import quotas
|
||||
|
||||
|
||||
class V1Controller(base.BaseController):
|
||||
|
||||
loadbalancers = load_balancer.LoadBalancersController()
|
||||
quotas = quotas.QuotasController()
|
||||
|
||||
@wsme_pecan.wsexpose(wtypes.text)
|
||||
def get(self):
|
||||
|
@ -96,3 +96,42 @@ class BaseController(rest.RestController):
|
||||
"""Get a L7 Rule from the database."""
|
||||
return self._get_db_obj(session, self.repositories.l7rule,
|
||||
data_models.L7Rule, id)
|
||||
|
||||
def _get_default_quotas(self, project_id):
|
||||
"""Gets the project's default quotas."""
|
||||
quotas = data_models.Quotas(
|
||||
project_id=project_id,
|
||||
load_balancer=CONF.quotas.default_load_balancer_quota,
|
||||
listener=CONF.quotas.default_listener_quota,
|
||||
pool=CONF.quotas.default_pool_quota,
|
||||
health_monitor=CONF.quotas.default_health_monitor_quota,
|
||||
member=CONF.quotas.default_member_quota)
|
||||
return quotas
|
||||
|
||||
def _get_db_quotas(self, session, project_id):
|
||||
"""Gets the project's quotas from the database, or responds with the
|
||||
|
||||
default quotas.
|
||||
"""
|
||||
# At this point project_id should not ever be None or Unset
|
||||
db_quotas = self.repositories.quotas.get(
|
||||
session, project_id=project_id)
|
||||
if not db_quotas:
|
||||
LOG.debug("No custom quotas for project %s. Returning "
|
||||
"defaults...", project_id)
|
||||
db_quotas = self._get_default_quotas(project_id=project_id)
|
||||
else:
|
||||
# Fill in any that are using the configured defaults
|
||||
if db_quotas.load_balancer is None:
|
||||
db_quotas.load_balancer = (CONF.quotas.
|
||||
default_load_balancer_quota)
|
||||
if db_quotas.listener is None:
|
||||
db_quotas.listener = CONF.quotas.default_listener_quota
|
||||
if db_quotas.pool is None:
|
||||
db_quotas.pool = CONF.quotas.default_pool_quota
|
||||
if db_quotas.health_monitor is None:
|
||||
db_quotas.health_monitor = (CONF.quotas.
|
||||
default_health_monitor_quota)
|
||||
if db_quotas.member is None:
|
||||
db_quotas.member = CONF.quotas.default_member_quota
|
||||
return db_quotas
|
||||
|
@ -25,6 +25,7 @@ from octavia.api.v1.types import health_monitor as hm_types
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
@ -94,6 +95,7 @@ class HealthMonitorController(base.BaseController):
|
||||
def post(self, health_monitor):
|
||||
"""Creates a health monitor on a pool."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
|
||||
try:
|
||||
db_hm = self.repositories.health_monitor.get(
|
||||
context.session, pool_id=self.pool_id)
|
||||
@ -101,26 +103,33 @@ class HealthMonitorController(base.BaseController):
|
||||
raise exceptions.DuplicateHealthMonitor()
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
hm_dict = db_prepare.create_health_monitor(
|
||||
health_monitor.to_dict(render_unsets=True), self.pool_id)
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
if self.repositories.check_quota_met(
|
||||
context.session,
|
||||
lock_session,
|
||||
data_models.HealthMonitor,
|
||||
health_monitor.project_id):
|
||||
lock_session.rollback()
|
||||
raise exceptions.QuotaException
|
||||
|
||||
try:
|
||||
db_hm = self.repositories.health_monitor.create(context.session,
|
||||
hm_dict = db_prepare.create_health_monitor(
|
||||
health_monitor.to_dict(render_unsets=True), self.pool_id)
|
||||
self._test_lb_and_listener_statuses(lock_session)
|
||||
|
||||
db_hm = self.repositories.health_monitor.create(lock_session,
|
||||
**hm_dict)
|
||||
db_new_hm = self._get_db_hm(lock_session)
|
||||
lock_session.commit()
|
||||
except odb_exceptions.DBError:
|
||||
# Setting LB and Listener back to active because this is just a
|
||||
# validation failure
|
||||
self.repositories.load_balancer.update(
|
||||
context.session, self.load_balancer_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
for listener_id in self._get_affected_listener_ids(
|
||||
context.session):
|
||||
self.repositories.listener.update(
|
||||
context.session, listener_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
lock_session.rollback()
|
||||
raise exceptions.InvalidOption(value=hm_dict.get('type'),
|
||||
option='type')
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Health Monitor for Pool %s to "
|
||||
"handler"), self.pool_id)
|
||||
@ -132,8 +141,8 @@ class HealthMonitorController(base.BaseController):
|
||||
self.repositories.listener.update(
|
||||
context.session, listener_id,
|
||||
operating_status=constants.ERROR)
|
||||
db_hm = self._get_db_hm(context.session)
|
||||
return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
|
||||
return self._convert_db_to_type(db_new_hm,
|
||||
hm_types.HealthMonitorResponse)
|
||||
|
||||
@wsme_pecan.wsexpose(hm_types.HealthMonitorResponse,
|
||||
body=hm_types.HealthMonitorPUT, status_code=202)
|
||||
|
@ -29,6 +29,7 @@ from octavia.api.v1.types import listener as listener_types
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
@ -105,12 +106,11 @@ class ListenersController(base.BaseController):
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.Pool._name(), id=pool_id)
|
||||
|
||||
def _validate_listener(self, session, listener_dict):
|
||||
def _validate_listener(self, lock_session, listener_dict):
|
||||
"""Validate listener for wrong protocol or duplicate listeners
|
||||
|
||||
Update the load balancer db when provisioning status changes.
|
||||
"""
|
||||
lb_repo = self.repositories.load_balancer
|
||||
if (listener_dict
|
||||
and listener_dict.get('insert_headers')
|
||||
and list(set(listener_dict['insert_headers'].keys()) -
|
||||
@ -122,32 +122,27 @@ class ListenersController(base.BaseController):
|
||||
try:
|
||||
sni_containers = listener_dict.pop('sni_containers', [])
|
||||
db_listener = self.repositories.listener.create(
|
||||
session, **listener_dict)
|
||||
lock_session, **listener_dict)
|
||||
if sni_containers:
|
||||
for container in sni_containers:
|
||||
sni_dict = {'listener_id': db_listener.id,
|
||||
'tls_container_id': container.get(
|
||||
'tls_container_id')}
|
||||
self.repositories.sni.create(session, **sni_dict)
|
||||
db_listener = self.repositories.listener.get(session,
|
||||
self.repositories.sni.create(lock_session, **sni_dict)
|
||||
db_listener = self.repositories.listener.get(lock_session,
|
||||
id=db_listener.id)
|
||||
return db_listener
|
||||
except odb_exceptions.DBDuplicateEntry as de:
|
||||
# Setting LB back to active because this is just a validation
|
||||
# failure
|
||||
lb_repo.update(session, self.load_balancer_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
if ['id'] == de.columns:
|
||||
raise exceptions.IDAlreadyExists()
|
||||
elif set(['load_balancer_id', 'protocol_port']) == set(de.columns):
|
||||
raise exceptions.DuplicateListenerEntry(
|
||||
port=listener_dict.get('protocol_port'))
|
||||
except odb_exceptions.DBError:
|
||||
# Setting LB back to active because this is just a validation
|
||||
# failure
|
||||
lb_repo.update(session, self.load_balancer_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
raise exceptions.InvalidOption(value=listener_dict.get('protocol'),
|
||||
option='protocol')
|
||||
|
||||
def _send_listener_to_handler(self, session, db_listener):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Listener %s to handler"),
|
||||
db_listener.id)
|
||||
@ -165,21 +160,38 @@ class ListenersController(base.BaseController):
|
||||
body=listener_types.ListenerPOST, status_code=202)
|
||||
def post(self, listener):
|
||||
"""Creates a listener on a load balancer."""
|
||||
self._secure_data(listener)
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
if self.repositories.check_quota_met(
|
||||
context.session,
|
||||
lock_session,
|
||||
data_models.Listener,
|
||||
listener.project_id):
|
||||
lock_session.rollback()
|
||||
raise exceptions.QuotaException
|
||||
|
||||
try:
|
||||
self._secure_data(listener)
|
||||
listener_dict = db_prepare.create_listener(
|
||||
listener.to_dict(render_unsets=True), self.load_balancer_id)
|
||||
if listener_dict['default_pool_id']:
|
||||
self._validate_pool(context.session,
|
||||
self._validate_pool(lock_session,
|
||||
listener_dict['default_pool_id'])
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
# NOTE(blogan): Throwing away because we should not store secure data
|
||||
# in the database nor should we send it to a handler.
|
||||
self._test_lb_and_listener_statuses(lock_session)
|
||||
# NOTE(blogan): Throwing away because we should not store
|
||||
# secure data in the database nor should we send it to a handler.
|
||||
if 'tls_termination' in listener_dict:
|
||||
del listener_dict['tls_termination']
|
||||
# This is the extra validation layer for wrong protocol or duplicate
|
||||
# listeners on the same load balancer.
|
||||
return self._validate_listener(context.session, listener_dict)
|
||||
# This is the extra validation layer for wrong protocol or
|
||||
# duplicate listeners on the same load balancer.
|
||||
db_listener = self._validate_listener(lock_session, listener_dict)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
return self._send_listener_to_handler(context.session, db_listener)
|
||||
|
||||
@wsme_pecan.wsexpose(listener_types.ListenerResponse, wtypes.text,
|
||||
body=listener_types.ListenerPUT, status_code=202)
|
||||
|
@ -29,6 +29,7 @@ from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
import octavia.common.validate as validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
@ -74,14 +75,17 @@ class LoadBalancersController(base.BaseController):
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
id=id)
|
||||
|
||||
def _create_load_balancer_graph(self, context, load_balancer):
|
||||
def _create_load_balancer_graph_db(self, lock_session, load_balancer):
|
||||
prepped_lb = db_prepare.create_load_balancer_tree(
|
||||
load_balancer.to_dict(render_unsets=True))
|
||||
try:
|
||||
db_lb = self.repositories.create_load_balancer_tree(
|
||||
context.session, prepped_lb)
|
||||
lock_session, prepped_lb)
|
||||
except Exception:
|
||||
raise
|
||||
return db_lb
|
||||
|
||||
def _load_balancer_graph_to_handler(self, context, db_lb):
|
||||
try:
|
||||
LOG.info(_LI("Sending full load balancer configuration %s to "
|
||||
"the handler"), db_lb.id)
|
||||
@ -98,24 +102,49 @@ class LoadBalancersController(base.BaseController):
|
||||
body=lb_types.LoadBalancerPOST, status_code=202)
|
||||
def post(self, load_balancer):
|
||||
"""Creates a load balancer."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
# Validate the subnet id
|
||||
if load_balancer.vip.subnet_id:
|
||||
if not validate.subnet_exists(load_balancer.vip.subnet_id):
|
||||
raise exceptions.NotFound(resource='Subnet',
|
||||
id=load_balancer.vip.subnet_id)
|
||||
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
if self.repositories.check_quota_met(
|
||||
context.session,
|
||||
lock_session,
|
||||
data_models.LoadBalancer,
|
||||
load_balancer.project_id):
|
||||
lock_session.rollback()
|
||||
raise exceptions.QuotaException
|
||||
|
||||
if load_balancer.listeners:
|
||||
return self._create_load_balancer_graph(context, load_balancer)
|
||||
try:
|
||||
db_lb = self._create_load_balancer_graph_db(lock_session,
|
||||
load_balancer)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
return self._load_balancer_graph_to_handler(context, db_lb)
|
||||
|
||||
try:
|
||||
lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict(
|
||||
render_unsets=True
|
||||
))
|
||||
vip_dict = lb_dict.pop('vip', {})
|
||||
try:
|
||||
|
||||
db_lb = self.repositories.create_load_balancer_and_vip(
|
||||
context.session, lb_dict, vip_dict)
|
||||
lock_session, lb_dict, vip_dict)
|
||||
lock_session.commit()
|
||||
except odb_exceptions.DBDuplicateEntry:
|
||||
lock_session.rollback()
|
||||
raise exceptions.IDAlreadyExists()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
# Handler will be responsible for sending to controller
|
||||
try:
|
||||
LOG.info(_LI("Sending created Load Balancer %s to the handler"),
|
||||
|
@ -24,8 +24,10 @@ from wsmeext import pecan as wsme_pecan
|
||||
from octavia.api.v1.controllers import base
|
||||
from octavia.api.v1.types import member as member_types
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
import octavia.common.validate as validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
@ -96,31 +98,37 @@ class MembersController(base.BaseController):
|
||||
if member.subnet_id and not validate.subnet_exists(member.subnet_id):
|
||||
raise exceptions.NotFound(resource='Subnet',
|
||||
id=member.subnet_id)
|
||||
member_dict = db_prepare.create_member(member.to_dict(
|
||||
render_unsets=True), self.pool_id)
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
if self.repositories.check_quota_met(
|
||||
context.session,
|
||||
lock_session,
|
||||
data_models.Member,
|
||||
member.project_id):
|
||||
lock_session.rollback()
|
||||
raise exceptions.QuotaException
|
||||
|
||||
try:
|
||||
db_member = self.repositories.member.create(context.session,
|
||||
member_dict = db_prepare.create_member(member.to_dict(
|
||||
render_unsets=True), self.pool_id)
|
||||
self._test_lb_and_listener_statuses(lock_session)
|
||||
|
||||
db_member = self.repositories.member.create(lock_session,
|
||||
**member_dict)
|
||||
db_new_member = self._get_db_member(lock_session, db_member.id)
|
||||
lock_session.commit()
|
||||
except oslo_exc.DBDuplicateEntry as de:
|
||||
# Setting LB and Listener back to active because this is just a
|
||||
# validation failure
|
||||
self.repositories.load_balancer.update(
|
||||
context.session, self.load_balancer_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
for listener_id in self._get_affected_listener_ids(
|
||||
context.session):
|
||||
self.repositories.listener.update(
|
||||
context.session, listener_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
lock_session.rollback()
|
||||
if ['id'] == de.columns:
|
||||
raise exceptions.IDAlreadyExists()
|
||||
elif (set(['pool_id', 'ip_address', 'protocol_port']) ==
|
||||
set(de.columns)):
|
||||
|
||||
raise exceptions.DuplicateMemberEntry(
|
||||
ip_address=member_dict.get('ip_address'),
|
||||
port=member_dict.get('protocol_port'))
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Member %s to handler"),
|
||||
db_member.id)
|
||||
@ -132,8 +140,8 @@ class MembersController(base.BaseController):
|
||||
self.repositories.listener.update(
|
||||
context.session, listener_id,
|
||||
operating_status=constants.ERROR)
|
||||
db_member = self._get_db_member(context.session, db_member.id)
|
||||
return self._convert_db_to_type(db_member, member_types.MemberResponse)
|
||||
return self._convert_db_to_type(db_new_member,
|
||||
member_types.MemberResponse)
|
||||
|
||||
@wsme_pecan.wsexpose(member_types.MemberResponse,
|
||||
wtypes.text, body=member_types.MemberPUT,
|
||||
|
@ -28,6 +28,7 @@ from octavia.api.v1.types import pool as pool_types
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import prepare as db_prepare
|
||||
from octavia.i18n import _LI
|
||||
|
||||
@ -88,31 +89,25 @@ class PoolsController(base.BaseController):
|
||||
raise exceptions.ImmutableObject(resource=db_lb._name(),
|
||||
id=self.load_balancer_id)
|
||||
|
||||
def _validate_create_pool(self, session, pool_dict):
|
||||
def _validate_create_pool(self, lock_session, pool_dict):
|
||||
"""Validate creating pool on load balancer.
|
||||
|
||||
Update database for load balancer and (optional) listener based on
|
||||
provisioning status.
|
||||
"""
|
||||
try:
|
||||
db_pool = self.repositories.create_pool_on_load_balancer(
|
||||
session, pool_dict, listener_id=self.listener_id)
|
||||
return self.repositories.create_pool_on_load_balancer(
|
||||
lock_session, pool_dict, listener_id=self.listener_id)
|
||||
except odb_exceptions.DBDuplicateEntry as de:
|
||||
if ['id'] == de.columns:
|
||||
raise exceptions.IDAlreadyExists()
|
||||
except odb_exceptions.DBError:
|
||||
# Setting LB and Listener back to active because this is just a
|
||||
# validation failure
|
||||
for listener_id in self._get_affected_listener_ids(session):
|
||||
self.repositories.listener.update(
|
||||
session, listener_id, provisioning_status=constants.ACTIVE)
|
||||
self.repositories.load_balancer.update(
|
||||
session, self.load_balancer_id,
|
||||
provisioning_status=constants.ACTIVE)
|
||||
# TODO(blogan): will have to do separate validation protocol
|
||||
# before creation or update since the exception messages
|
||||
# do not give any information as to what constraint failed
|
||||
raise exceptions.InvalidOption(value='', option='')
|
||||
|
||||
def _send_pool_to_handler(self, session, db_pool):
|
||||
try:
|
||||
LOG.info(_LI("Sending Creation of Pool %s to handler"),
|
||||
db_pool.id)
|
||||
@ -138,21 +133,40 @@ class PoolsController(base.BaseController):
|
||||
# For some API requests the listener_id will be passed in the
|
||||
# pool_dict:
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
pool_dict = db_prepare.create_pool(pool.to_dict(render_unsets=True))
|
||||
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
if self.repositories.check_quota_met(
|
||||
context.session,
|
||||
lock_session,
|
||||
data_models.Pool,
|
||||
pool.project_id):
|
||||
lock_session.rollback()
|
||||
raise exceptions.QuotaException
|
||||
|
||||
try:
|
||||
pool_dict = db_prepare.create_pool(
|
||||
pool.to_dict(render_unsets=True))
|
||||
if 'listener_id' in pool_dict:
|
||||
if pool_dict['listener_id'] is not None:
|
||||
self.listener_id = pool_dict.pop('listener_id')
|
||||
else:
|
||||
del pool_dict['listener_id']
|
||||
if self.listener_id and self.repositories.listener.has_default_pool(
|
||||
context.session, self.listener_id):
|
||||
listener_repo = self.repositories.listener
|
||||
if self.listener_id and listener_repo.has_default_pool(
|
||||
lock_session, self.listener_id):
|
||||
raise exceptions.DuplicatePoolEntry()
|
||||
self._test_lb_and_listener_statuses(context.session)
|
||||
self._test_lb_and_listener_statuses(lock_session)
|
||||
|
||||
pool_dict['operating_status'] = constants.OFFLINE
|
||||
pool_dict['load_balancer_id'] = self.load_balancer_id
|
||||
|
||||
return self._validate_create_pool(context.session, pool_dict)
|
||||
db_pool = self._validate_create_pool(lock_session, pool_dict)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
return self._send_pool_to_handler(context.session, db_pool)
|
||||
|
||||
@wsme_pecan.wsexpose(pool_types.PoolResponse, wtypes.text,
|
||||
body=pool_types.PoolPUT, status_code=202)
|
||||
|
89
octavia/api/v1/controllers/quotas.py
Normal file
89
octavia/api/v1/controllers/quotas.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Copyright 2016 Rackspace
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from oslo_config import cfg
|
||||
import pecan
|
||||
from wsme import types as wtypes
|
||||
from wsmeext import pecan as wsme_pecan
|
||||
|
||||
from octavia.api.v1.controllers import base
|
||||
from octavia.api.v1.types import quotas as quota_types
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_group('quotas', 'octavia.common.config')
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QuotasController(base.BaseController):
|
||||
|
||||
def __init__(self):
|
||||
super(QuotasController, self).__init__()
|
||||
|
||||
@wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text)
|
||||
def get(self, project_id):
|
||||
"""Get a single project's quota details."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
db_quotas = self._get_db_quotas(context.session, project_id)
|
||||
return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse)
|
||||
|
||||
@wsme_pecan.wsexpose(quota_types.QuotaAllResponse)
|
||||
def get_all(self):
|
||||
"""List all non-default quotas."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
db_quotas = self.repositories.quotas.get_all(context.session)
|
||||
quotas = quota_types.QuotaAllResponse.from_data_model(db_quotas)
|
||||
return quotas
|
||||
|
||||
@wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text,
|
||||
body=quota_types.QuotaPUT, status_code=202)
|
||||
def put(self, project_id, quotas):
|
||||
"""Update any or all quotas for a project."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
quotas_dict = quotas.to_dict()
|
||||
self.repositories.quotas.update(context.session, project_id,
|
||||
**quotas_dict)
|
||||
db_quotas = self._get_db_quotas(context.session, project_id)
|
||||
return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse)
|
||||
|
||||
@wsme_pecan.wsexpose(None, wtypes.text, status_code=202)
|
||||
def delete(self, project_id):
|
||||
"""Reset a project's quotas to the default values."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
project_id = context.project_id or project_id
|
||||
self.repositories.quotas.delete(context.session, project_id)
|
||||
db_quotas = self._get_db_quotas(context.session, project_id)
|
||||
return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse)
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, project_id, *remainder):
|
||||
"""Overridden pecan _lookup method for routing default endpoint."""
|
||||
if project_id and len(remainder) and remainder[0] == 'default':
|
||||
return QuotasDefaultController(project_id), ''
|
||||
|
||||
|
||||
class QuotasDefaultController(base.BaseController):
|
||||
|
||||
def __init__(self, project_id):
|
||||
super(QuotasDefaultController, self).__init__()
|
||||
self.project_id = project_id
|
||||
|
||||
@wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text)
|
||||
def get(self):
|
||||
"""Get a project's default quota details."""
|
||||
context = pecan.request.context.get('octavia_context')
|
||||
project_id = context.project_id
|
||||
quotas = self._get_default_quotas(project_id)
|
||||
return self._convert_db_to_type(quotas, quota_types.QuotaResponse)
|
73
octavia/api/v1/types/quotas.py
Normal file
73
octavia/api/v1/types/quotas.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Copyright 2016 Rackspace
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from wsme import types as wtypes
|
||||
|
||||
from octavia.api.v1.types import base
|
||||
|
||||
|
||||
class QuotaBase(base.BaseType):
|
||||
"""Individual quota definitions."""
|
||||
load_balancer = wtypes.wsattr(wtypes.IntegerType())
|
||||
listener = wtypes.wsattr(wtypes.IntegerType())
|
||||
member = wtypes.wsattr(wtypes.IntegerType())
|
||||
pool = wtypes.wsattr(wtypes.IntegerType())
|
||||
health_monitor = wtypes.wsattr(wtypes.IntegerType())
|
||||
|
||||
|
||||
class QuotaResponse(base.BaseType):
|
||||
"""Wrapper object for quotas responses."""
|
||||
quota = wtypes.wsattr(QuotaBase)
|
||||
|
||||
@classmethod
|
||||
def from_data_model(cls, data_model, children=False):
|
||||
quotas = super(QuotaResponse, cls).from_data_model(
|
||||
data_model, children=children)
|
||||
quotas.quota = QuotaBase.from_data_model(data_model)
|
||||
return quotas
|
||||
|
||||
|
||||
class QuotaAllBase(base.BaseType):
|
||||
"""Wrapper object for get all quotas responses."""
|
||||
project_id = wtypes.wsattr(wtypes.StringType())
|
||||
tenant_id = wtypes.wsattr(wtypes.StringType())
|
||||
load_balancer = wtypes.wsattr(wtypes.IntegerType())
|
||||
listener = wtypes.wsattr(wtypes.IntegerType())
|
||||
member = wtypes.wsattr(wtypes.IntegerType())
|
||||
pool = wtypes.wsattr(wtypes.IntegerType())
|
||||
health_monitor = wtypes.wsattr(wtypes.IntegerType())
|
||||
|
||||
@classmethod
|
||||
def from_data_model(cls, data_model, children=False):
|
||||
quotas = super(QuotaAllBase, cls).from_data_model(
|
||||
data_model, children=children)
|
||||
quotas.tenant_id = quotas.project_id
|
||||
return quotas
|
||||
|
||||
|
||||
class QuotaAllResponse(base.BaseType):
|
||||
quotas = wtypes.wsattr([QuotaAllBase])
|
||||
|
||||
@classmethod
|
||||
def from_data_model(cls, data_model, children=False):
|
||||
quotalist = QuotaAllResponse()
|
||||
quotalist.quotas = [
|
||||
QuotaAllBase.from_data_model(obj)
|
||||
for obj in data_model]
|
||||
return quotalist
|
||||
|
||||
|
||||
class QuotaPUT(base.BaseType):
|
||||
"""Overall object for quota PUT request."""
|
||||
quota = wtypes.wsattr(QuotaBase)
|
@ -435,6 +435,24 @@ glance_opts = [
|
||||
help=_('Disable certificate validation on SSL connections ')),
|
||||
]
|
||||
|
||||
quota_opts = [
|
||||
cfg.IntOpt('default_load_balancer_quota',
|
||||
default=constants.QUOTA_UNLIMITED,
|
||||
help=_('Default per project load balancer quota.')),
|
||||
cfg.IntOpt('default_listener_quota',
|
||||
default=constants.QUOTA_UNLIMITED,
|
||||
help=_('Default per project listener quota.')),
|
||||
cfg.IntOpt('default_member_quota',
|
||||
default=constants.QUOTA_UNLIMITED,
|
||||
help=_('Default per project member quota.')),
|
||||
cfg.IntOpt('default_pool_quota',
|
||||
default=constants.QUOTA_UNLIMITED,
|
||||
help=_('Default per project pool quota.')),
|
||||
cfg.IntOpt('default_health_monitor_quota',
|
||||
default=constants.QUOTA_UNLIMITED,
|
||||
help=_('Default per project health monitor quota.')),
|
||||
]
|
||||
|
||||
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(core_opts)
|
||||
@ -454,6 +472,7 @@ cfg.CONF.register_cli_opts(healthmanager_opts, group='health_manager')
|
||||
cfg.CONF.register_opts(nova_opts, group='nova')
|
||||
cfg.CONF.register_opts(glance_opts, group='glance')
|
||||
cfg.CONF.register_opts(neutron_opts, group='neutron')
|
||||
cfg.CONF.register_opts(quota_opts, group='quotas')
|
||||
|
||||
|
||||
# Ensure that the control exchange is set correctly
|
||||
|
@ -161,6 +161,7 @@ AMPS_DATA = 'amps_data'
|
||||
NICS = 'nics'
|
||||
VIP = 'vip'
|
||||
POOL = 'pool'
|
||||
POOL_CHILD_COUNT = 'pool_child_count'
|
||||
POOL_ID = 'pool_id'
|
||||
L7POLICY = 'l7policy'
|
||||
L7RULE = 'l7rule'
|
||||
@ -310,6 +311,9 @@ NO_CHECK = 'no check'
|
||||
|
||||
HAPROXY_MEMBER_STATUSES = (UP, DOWN, NO_CHECK)
|
||||
|
||||
# Quota Constants
|
||||
QUOTA_UNLIMITED = -1
|
||||
|
||||
API_VERSION = '0.5'
|
||||
|
||||
HAPROXY_BASE_PEER_PORT = 1025
|
||||
|
@ -607,3 +607,30 @@ class L7Policy(BaseDataModel):
|
||||
if p.id == self.id:
|
||||
self.listener.l7policies.remove(p)
|
||||
break
|
||||
|
||||
|
||||
class Quotas(BaseDataModel):
|
||||
|
||||
def __init__(self,
|
||||
project_id=None,
|
||||
load_balancer=None,
|
||||
listener=None,
|
||||
pool=None,
|
||||
health_monitor=None,
|
||||
member=None,
|
||||
in_use_health_monitor=None,
|
||||
in_use_listener=None,
|
||||
in_use_load_balancer=None,
|
||||
in_use_member=None,
|
||||
in_use_pool=None):
|
||||
self.project_id = project_id
|
||||
self.health_monitor = health_monitor
|
||||
self.listener = listener
|
||||
self.load_balancer = load_balancer
|
||||
self.pool = pool
|
||||
self.member = member
|
||||
self.in_use_health_monitor = in_use_health_monitor
|
||||
self.in_use_listener = in_use_listener
|
||||
self.in_use_load_balancer = in_use_load_balancer
|
||||
self.in_use_member = in_use_member
|
||||
self.in_use_pool = in_use_pool
|
||||
|
@ -149,7 +149,7 @@ class ImmutableObject(APIException):
|
||||
|
||||
|
||||
class TooManyL7RulesOnL7Policy(APIException):
|
||||
message = _("Too many rules on L7 policy %(id)s")
|
||||
msg = _("Too many rules on L7 policy %(id)s")
|
||||
code = 409
|
||||
|
||||
|
||||
@ -173,8 +173,8 @@ class ComputeGetInterfaceException(OctaviaException):
|
||||
message = _('Failed to retrieve compute virtual interfaces.')
|
||||
|
||||
|
||||
class IDAlreadyExists(OctaviaException):
|
||||
message = _('Already an entity with that specified id.')
|
||||
class IDAlreadyExists(APIException):
|
||||
msg = _('Already an entity with that specified id.')
|
||||
code = 409
|
||||
|
||||
|
||||
@ -204,12 +204,12 @@ class InvalidTopology(OctaviaException):
|
||||
|
||||
# L7 policy and rule exceptions
|
||||
class InvalidL7PolicyAction(APIException):
|
||||
message = _('Invalid L7 Policy action specified: %(action)s')
|
||||
msg = _('Invalid L7 Policy action specified: %(action)s')
|
||||
code = 400
|
||||
|
||||
|
||||
class InvalidL7PolicyArgs(APIException):
|
||||
message = _('Invalid L7 Policy arguments: %(msg)s')
|
||||
msg = _('Invalid L7 Policy arguments: %(msg)s')
|
||||
code = 400
|
||||
|
||||
|
||||
@ -235,3 +235,17 @@ class ServerGroupObjectCreateException(OctaviaException):
|
||||
|
||||
class ServerGroupObjectDeleteException(OctaviaException):
|
||||
message = _('Failed to delete server group object.')
|
||||
|
||||
|
||||
class QuotaException(APIException):
|
||||
msg = _('Quota has been met.')
|
||||
code = 403
|
||||
|
||||
|
||||
class ProjectBusyException(APIException):
|
||||
msg = _('Project busy. Unable to lock the project. Please try again.')
|
||||
code = 503
|
||||
|
||||
|
||||
class MissingProjectID(OctaviaException):
|
||||
message = _('Missing project ID in request where one is required.')
|
||||
|
@ -66,6 +66,8 @@ class HealthMonitorFlows(object):
|
||||
requires=constants.POOL_ID))
|
||||
delete_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB(
|
||||
requires=constants.HEALTH_MON))
|
||||
delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota(
|
||||
requires=constants.HEALTH_MON))
|
||||
delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||
|
||||
|
@ -77,6 +77,8 @@ class ListenerFlows(object):
|
||||
requires=constants.LOADBALANCER))
|
||||
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
|
||||
requires=constants.LISTENER))
|
||||
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
|
||||
requires=constants.LISTENER))
|
||||
delete_listener_flow.add(database_tasks.MarkLBActiveInDB(
|
||||
requires=constants.LOADBALANCER))
|
||||
|
||||
@ -96,8 +98,11 @@ class ListenerFlows(object):
|
||||
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
|
||||
name='delete_listener_in_db_' + listener_name,
|
||||
requires=constants.LISTENER,
|
||||
rebind={constants.LISTENER: listener_name}
|
||||
))
|
||||
rebind={constants.LISTENER: listener_name}))
|
||||
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
|
||||
name='decrement_listener_quota_' + listener_name,
|
||||
requires=constants.LISTENER,
|
||||
rebind={constants.LISTENER: listener_name}))
|
||||
|
||||
return delete_listener_flow
|
||||
|
||||
|
@ -232,6 +232,8 @@ class LoadBalancerFlows(object):
|
||||
requires=constants.LOADBALANCER))
|
||||
delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(
|
||||
requires=constants.LOADBALANCER))
|
||||
delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(
|
||||
requires=constants.LOADBALANCER))
|
||||
|
||||
return (delete_LB_flow, store)
|
||||
|
||||
@ -285,6 +287,8 @@ class LoadBalancerFlows(object):
|
||||
requires=constants.LOADBALANCER))
|
||||
delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(
|
||||
requires=constants.LOADBALANCER))
|
||||
delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(
|
||||
requires=constants.LOADBALANCER))
|
||||
|
||||
return (delete_LB_flow, store)
|
||||
|
||||
|
@ -75,6 +75,8 @@ class MemberFlows(object):
|
||||
requires=constants.MEMBER))
|
||||
delete_member_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||
delete_member_flow.add(database_tasks.DecrementMemberQuota(
|
||||
requires=constants.MEMBER))
|
||||
delete_member_flow.add(database_tasks.
|
||||
MarkLBAndListenersActiveInDB(
|
||||
requires=[constants.LOADBALANCER,
|
||||
|
@ -57,12 +57,16 @@ class PoolFlows(object):
|
||||
constants.LOADBALANCER]))
|
||||
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
|
||||
requires=constants.POOL))
|
||||
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
|
||||
requires=constants.POOL, provides=constants.POOL_CHILD_COUNT))
|
||||
delete_pool_flow.add(model_tasks.DeleteModelObject(
|
||||
rebind={constants.OBJECT: constants.POOL}))
|
||||
delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||
delete_pool_flow.add(database_tasks.DeletePoolInDB(
|
||||
requires=constants.POOL))
|
||||
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
|
||||
requires=[constants.POOL, constants.POOL_CHILD_COUNT]))
|
||||
delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||
|
||||
@ -80,6 +84,10 @@ class PoolFlows(object):
|
||||
name='delete_pool_in_db_' + name,
|
||||
requires=constants.POOL,
|
||||
rebind={constants.POOL: name}))
|
||||
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
|
||||
name='decrement_pool_quota_' + name,
|
||||
requires=constants.POOL,
|
||||
rebind={constants.POOL: name}))
|
||||
|
||||
return delete_pool_flow
|
||||
|
||||
|
@ -17,7 +17,9 @@ import logging
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as odb_exceptions
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
import sqlalchemy
|
||||
from sqlalchemy.orm import exc
|
||||
from taskflow import task
|
||||
@ -2201,3 +2203,369 @@ class MarkPoolPendingUpdateInDB(BaseDatabaseTask):
|
||||
LOG.warning(_LW("Reverting mark pool pending update in DB "
|
||||
"for pool id %s"), pool.id)
|
||||
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||
|
||||
|
||||
class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
||||
"""Decrements the health monitor quota for a project.
|
||||
|
||||
Since sqlalchemy will likely retry by itself always revert if it fails
|
||||
"""
|
||||
|
||||
def execute(self, health_mon):
|
||||
"""Decrements the health monitor quota.
|
||||
|
||||
:param health_mon: The health monitor to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.debug("Decrementing health monitor quota for "
|
||||
"project: %s ", health_mon.project_id)
|
||||
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.decrement_quota(lock_session,
|
||||
data_models.HealthMonitor,
|
||||
health_mon.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement health monitor quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=health_mon.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, health_mon, result, *args, **kwargs):
|
||||
"""Re-apply the quota
|
||||
|
||||
:param health_mon: The health monitor to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for health monitor '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=health_mon.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
|
||||
try:
|
||||
session = db_apis.get_session()
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.HealthMonitor,
|
||||
health_mon.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
except Exception:
|
||||
# Don't fail the revert flow
|
||||
pass
|
||||
|
||||
|
||||
class DecrementListenerQuota(BaseDatabaseTask):
|
||||
"""Decrements the listener quota for a project.
|
||||
|
||||
Since sqlalchemy will likely retry by itself always revert if it fails
|
||||
"""
|
||||
|
||||
def execute(self, listener):
|
||||
"""Decrements the listener quota.
|
||||
|
||||
:param listener: The listener to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.debug("Decrementing listener quota for "
|
||||
"project: %s ", listener.project_id)
|
||||
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.decrement_quota(lock_session,
|
||||
data_models.Listener,
|
||||
listener.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement listener quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=listener.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, listener, result, *args, **kwargs):
|
||||
"""Re-apply the quota
|
||||
|
||||
:param listener: The listener to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for listener '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=listener.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
|
||||
try:
|
||||
session = db_apis.get_session()
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Listener,
|
||||
listener.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
except Exception:
|
||||
# Don't fail the revert flow
|
||||
pass
|
||||
|
||||
|
||||
class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
||||
"""Decrements the load balancer quota for a project.
|
||||
|
||||
Since sqlalchemy will likely retry by itself always revert if it fails
|
||||
"""
|
||||
|
||||
def execute(self, loadbalancer):
|
||||
"""Decrements the load balancer quota.
|
||||
|
||||
:param loadbalancer: The load balancer to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.debug("Decrementing load balancer quota for "
|
||||
"project: %s ", loadbalancer.project_id)
|
||||
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.decrement_quota(lock_session,
|
||||
data_models.LoadBalancer,
|
||||
loadbalancer.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement load balancer quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=loadbalancer.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, loadbalancer, result, *args, **kwargs):
|
||||
"""Re-apply the quota
|
||||
|
||||
:param loadbalancer: The load balancer to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for load balancer '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=loadbalancer.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
|
||||
try:
|
||||
session = db_apis.get_session()
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.LoadBalancer,
|
||||
loadbalancer.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
except Exception:
|
||||
# Don't fail the revert flow
|
||||
pass
|
||||
|
||||
|
||||
class DecrementMemberQuota(BaseDatabaseTask):
|
||||
"""Decrements the member quota for a project.
|
||||
|
||||
Since sqlalchemy will likely retry by itself always revert if it fails
|
||||
"""
|
||||
|
||||
def execute(self, member):
|
||||
"""Decrements the member quota.
|
||||
|
||||
:param member: The member to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.debug("Decrementing member quota for "
|
||||
"project: %s ", member.project_id)
|
||||
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.decrement_quota(lock_session,
|
||||
data_models.Member,
|
||||
member.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement member quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=member.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, member, result, *args, **kwargs):
|
||||
"""Re-apply the quota
|
||||
|
||||
:param member: The member to decrement the quota on.
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for member '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=member.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
|
||||
try:
|
||||
session = db_apis.get_session()
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Member,
|
||||
member.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
except Exception:
|
||||
# Don't fail the revert flow
|
||||
pass
|
||||
|
||||
|
||||
class DecrementPoolQuota(BaseDatabaseTask):
|
||||
"""Decrements the pool quota for a project.
|
||||
|
||||
Since sqlalchemy will likely retry by itself always revert if it fails
|
||||
"""
|
||||
|
||||
def execute(self, pool, pool_child_count):
|
||||
"""Decrements the pool quota.
|
||||
|
||||
:param pool: The pool to decrement the quota on
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.debug("Decrementing pool quota for "
|
||||
"project: %s ", pool.project_id)
|
||||
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.decrement_quota(lock_session,
|
||||
data_models.Pool,
|
||||
pool.project_id)
|
||||
|
||||
# Pools cascade delete members and health monitors
|
||||
# update the quota for those items as well.
|
||||
if pool_child_count['HM'] > 0:
|
||||
self.repos.decrement_quota(lock_session,
|
||||
data_models.HealthMonitor,
|
||||
pool.project_id)
|
||||
if pool_child_count['member'] > 0:
|
||||
self.repos.decrement_quota(
|
||||
lock_session, data_models.Member,
|
||||
pool.project_id, quantity=pool_child_count['member'])
|
||||
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Failed to decrement pool quota for '
|
||||
'project: {proj} the project may have excess '
|
||||
'quota in use.').format(
|
||||
proj=pool.project_id))
|
||||
lock_session.rollback()
|
||||
|
||||
def revert(self, pool, pool_child_count, result, *args, **kwargs):
|
||||
"""Re-apply the quota
|
||||
|
||||
:param project_id: The id of project to decrement the quota on
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.warning(_LW('Reverting decrement quota for pool '
|
||||
'on project {proj} Project quota counts may be '
|
||||
'incorrect.').format(proj=pool.project_id))
|
||||
|
||||
# Increment the quota back if this task wasn't the failure
|
||||
if not isinstance(result, failure.Failure):
|
||||
|
||||
# These are all independent to maximize the correction
|
||||
# in case other quota actions have occurred
|
||||
try:
|
||||
session = db_apis.get_session()
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Pool,
|
||||
pool.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
|
||||
# Attempt to increment back the health monitor quota
|
||||
if pool_child_count['HM'] > 0:
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.HealthMonitor,
|
||||
pool.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
|
||||
# Attempt to increment back the member quota
|
||||
# This is seperate calls to maximize the correction
|
||||
# should other factors have increased the in use quota
|
||||
# before this point in the revert flow
|
||||
for i in six.moves.range(pool_child_count['member']):
|
||||
lock_session = db_apis.get_session(autocommit=False)
|
||||
try:
|
||||
self.repos.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Member,
|
||||
pool.project_id)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
lock_session.rollback()
|
||||
except Exception:
|
||||
# Don't fail the revert flow
|
||||
pass
|
||||
|
||||
|
||||
class CountPoolChildrenForQuota(BaseDatabaseTask):
|
||||
"""Counts the pool child resources for quota management.
|
||||
|
||||
Since the children of pools are cleaned up by the sqlalchemy
|
||||
cascade delete settings, we need to collect the quota counts
|
||||
for the child objects early.
|
||||
|
||||
"""
|
||||
|
||||
def execute(self, pool):
|
||||
"""Count the pool child resources for quota management
|
||||
|
||||
:param pool: The pool to count children on
|
||||
:returns: None
|
||||
"""
|
||||
|
||||
LOG.debug("Counting pool children for "
|
||||
"project: %s ", pool.project_id)
|
||||
|
||||
health_mon_count = 1 if pool.health_monitor else 0
|
||||
member_count = len(pool.members)
|
||||
|
||||
return {'HM': health_mon_count, 'member': member_count}
|
||||
|
@ -30,7 +30,8 @@ def get_engine():
|
||||
return facade.get_engine()
|
||||
|
||||
|
||||
def get_session(expire_on_commit=True):
|
||||
def get_session(expire_on_commit=True, autocommit=True):
|
||||
"""Helper method to grab session."""
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_session(expire_on_commit=expire_on_commit)
|
||||
return facade.get_session(expire_on_commit=expire_on_commit,
|
||||
autocommit=autocommit)
|
||||
|
@ -43,6 +43,8 @@ class OctaviaBase(models.ModelBase):
|
||||
elif obj.__class__.__name__ in ['SNI']:
|
||||
return (obj.__class__.__name__ +
|
||||
obj.listener_id + obj.tls_container_id)
|
||||
elif obj.__class__.__name__ in ['Quotas']:
|
||||
return obj.__class__.__name__ + obj.project_id
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
@ -0,0 +1,45 @@
|
||||
# Copyright 2016 Rackspace
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""create quotas table
|
||||
|
||||
Revision ID: 3f8ff3be828e
|
||||
Revises: 44a2414dd683
|
||||
Create Date: 2016-09-01 13:59:20.723621
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '3f8ff3be828e'
|
||||
down_revision = '44a2414dd683'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table(
|
||||
u'quotas',
|
||||
sa.Column(u'project_id', sa.String(36), primary_key=True,
|
||||
nullable=False),
|
||||
sa.Column(u'health_monitor', sa.Integer(), nullable=True),
|
||||
sa.Column(u'load_balancer', sa.Integer(), nullable=True),
|
||||
sa.Column(u'listener', sa.Integer(), nullable=True),
|
||||
sa.Column(u'member', sa.Integer(), nullable=True),
|
||||
sa.Column(u'pool', sa.Integer(), nullable=True),
|
||||
sa.Column(u'in_use_health_monitor', sa.Integer(), nullable=True),
|
||||
sa.Column(u'in_use_load_balancer', sa.Integer(), nullable=True),
|
||||
sa.Column(u'in_use_listener', sa.Integer(), nullable=True),
|
||||
sa.Column(u'in_use_member', sa.Integer(), nullable=True),
|
||||
sa.Column(u'in_use_pool', sa.Integer(), nullable=True),
|
||||
)
|
@ -553,3 +553,22 @@ class L7Policy(base_models.BASE, base_models.IdMixin, base_models.NameMixin):
|
||||
sa.ForeignKey("provisioning_status.name",
|
||||
name="fk_l7policy_provisioning_status_name"),
|
||||
nullable=True)
|
||||
|
||||
|
||||
class Quotas(base_models.BASE):
|
||||
|
||||
__data_model__ = data_models.Quotas
|
||||
|
||||
__tablename__ = "quotas"
|
||||
|
||||
project_id = sa.Column(sa.String(36), primary_key=True)
|
||||
health_monitor = sa.Column(sa.Integer(), nullable=True)
|
||||
listener = sa.Column(sa.Integer(), nullable=True)
|
||||
load_balancer = sa.Column(sa.Integer(), nullable=True)
|
||||
member = sa.Column(sa.Integer(), nullable=True)
|
||||
pool = sa.Column(sa.Integer(), nullable=True)
|
||||
in_use_health_monitor = sa.Column(sa.Integer(), nullable=True)
|
||||
in_use_listener = sa.Column(sa.Integer(), nullable=True)
|
||||
in_use_load_balancer = sa.Column(sa.Integer(), nullable=True)
|
||||
in_use_member = sa.Column(sa.Integer(), nullable=True)
|
||||
in_use_pool = sa.Column(sa.Integer(), nullable=True)
|
||||
|
@ -21,17 +21,25 @@ reference
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exception
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from octavia.common import constants
|
||||
from octavia.common import constants as consts
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.common import validate
|
||||
from octavia.db import api as db_api
|
||||
from octavia.db import models
|
||||
from octavia.i18n import _LE, _LW
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseRepository(object):
|
||||
model_class = None
|
||||
@ -135,6 +143,7 @@ class Repositories(object):
|
||||
self.vrrpgroup = VRRPGroupRepository()
|
||||
self.l7rule = L7RuleRepository()
|
||||
self.l7policy = L7PolicyRepository()
|
||||
self.quotas = QuotasRepository()
|
||||
|
||||
def create_load_balancer_and_vip(self, session, lb_dict, vip_dict):
|
||||
"""Inserts load balancer and vip entities into the database.
|
||||
@ -237,67 +246,368 @@ class Repositories(object):
|
||||
provisioning_status=listener_prov_status)
|
||||
return success
|
||||
|
||||
def check_quota_met(self, session, lock_session, _class, project_id):
|
||||
"""Checks and updates object quotas.
|
||||
|
||||
This method makes sure the project has available quota
|
||||
for the resource and updates the quota to reflect the
|
||||
new ussage.
|
||||
|
||||
:param session: Context database session
|
||||
:param lock_session: Locking database session (autocommit=False)
|
||||
:param _class: Data model object requesting quota
|
||||
:param project_id: Project ID requesting quota
|
||||
:returns: True if quota is met, False if quota was available
|
||||
"""
|
||||
LOG.debug('Checking quota for project: {proj} object: {obj}'.format(
|
||||
proj=project_id, obj=str(_class)))
|
||||
|
||||
# Under noauth everything is admin, so no quota
|
||||
if CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.debug('Auth strategy is NOAUTH, skipping quota check.')
|
||||
return False
|
||||
|
||||
if not project_id:
|
||||
raise exceptions.MissingProjectID()
|
||||
|
||||
quotas = self.quotas.get(session, project_id=project_id)
|
||||
if not quotas:
|
||||
# Make sure we have a record to lock
|
||||
quotas = self.quotas.update(
|
||||
session,
|
||||
project_id,
|
||||
quota={})
|
||||
session.flush()
|
||||
# Lock the project record in the database to block other quota checks
|
||||
#
|
||||
# Note: You cannot just use the current count as the in-use
|
||||
# value as we don't want to lock the whole resource table
|
||||
try:
|
||||
quotas = lock_session.query(models.Quotas).filter_by(
|
||||
project_id=project_id).with_for_update().first()
|
||||
if _class == data_models.LoadBalancer:
|
||||
# Decide which quota to use
|
||||
if quotas.load_balancer is None:
|
||||
lb_quota = CONF.quotas.default_load_balancer_quota
|
||||
else:
|
||||
lb_quota = quotas.load_balancer
|
||||
# Get the current in use count
|
||||
if not quotas.in_use_load_balancer:
|
||||
# This is to handle the upgrade case
|
||||
lb_count = session.query(models.LoadBalancer).filter(
|
||||
models.LoadBalancer.project_id == project_id,
|
||||
models.LoadBalancer.provisioning_status !=
|
||||
consts.DELETED).count() + 1
|
||||
else:
|
||||
lb_count = quotas.in_use_load_balancer + 1
|
||||
# Decide if the quota is met
|
||||
if lb_count <= lb_quota or lb_quota == consts.QUOTA_UNLIMITED:
|
||||
quotas.in_use_load_balancer = lb_count
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
if _class == data_models.Listener:
|
||||
# Decide which quota to use
|
||||
if quotas.listener is None:
|
||||
listener_quota = CONF.quotas.default_listener_quota
|
||||
else:
|
||||
listener_quota = quotas.listener
|
||||
# Get the current in use count
|
||||
if not quotas.in_use_listener:
|
||||
# This is to handle the upgrade case
|
||||
listener_count = session.query(models.Listener).filter(
|
||||
models.Listener.project_id == project_id,
|
||||
models.Listener.provisioning_status !=
|
||||
consts.DELETED).count() + 1
|
||||
else:
|
||||
listener_count = quotas.in_use_listener + 1
|
||||
# Decide if the quota is met
|
||||
if (listener_count <= listener_quota or
|
||||
listener_quota == consts.QUOTA_UNLIMITED):
|
||||
quotas.in_use_listener = listener_count
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
if _class == data_models.Pool:
|
||||
# Decide which quota to use
|
||||
if quotas.pool is None:
|
||||
pool_quota = CONF.quotas.default_pool_quota
|
||||
else:
|
||||
pool_quota = quotas.pool
|
||||
# Get the current in use count
|
||||
if not quotas.in_use_pool:
|
||||
# This is to handle the upgrade case
|
||||
pool_count = session.query(models.Pool).filter(
|
||||
models.Pool.project_id == project_id,
|
||||
models.Pool.provisioning_status !=
|
||||
consts.DELETED).count() + 1
|
||||
else:
|
||||
pool_count = quotas.in_use_pool + 1
|
||||
# Decide if the quota is met
|
||||
if (pool_count <= pool_quota or
|
||||
pool_quota == consts.QUOTA_UNLIMITED):
|
||||
quotas.in_use_pool = pool_count
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
if _class == data_models.HealthMonitor:
|
||||
# Decide which quota to use
|
||||
if quotas.health_monitor is None:
|
||||
hm_quota = CONF.quotas.default_health_monitor_quota
|
||||
else:
|
||||
hm_quota = quotas.health_monitor
|
||||
# Get the current in use count
|
||||
if not quotas.in_use_health_monitor:
|
||||
# This is to handle the upgrade case
|
||||
hm_count = session.query(models.HealthMonitor).filter(
|
||||
models.HealthMonitor.project_id == project_id,
|
||||
models.HealthMonitor.provisioning_status !=
|
||||
consts.DELETED).count() + 1
|
||||
else:
|
||||
hm_count = quotas.in_use_health_monitor + 1
|
||||
# Decide if the quota is met
|
||||
if (hm_count <= hm_quota or
|
||||
hm_quota == consts.QUOTA_UNLIMITED):
|
||||
quotas.in_use_health_monitor = hm_count
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
if _class == data_models.Member:
|
||||
# Decide which quota to use
|
||||
if quotas.member is None:
|
||||
member_quota = CONF.quotas.default_member_quota
|
||||
else:
|
||||
member_quota = quotas.member
|
||||
# Get the current in use count
|
||||
if not quotas.in_use_member:
|
||||
# This is to handle the upgrade case
|
||||
member_count = session.query(models.Member).filter(
|
||||
models.Member.project_id == project_id,
|
||||
models.Member.provisioning_status !=
|
||||
consts.DELETED).count() + 1
|
||||
else:
|
||||
member_count = quotas.in_use_member + 1
|
||||
# Decide if the quota is met
|
||||
if (member_count <= member_quota or
|
||||
member_quota == consts.QUOTA_UNLIMITED):
|
||||
quotas.in_use_member = member_count
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except db_exception.DBDeadlock:
|
||||
LOG.warning(_LW('Quota project lock timed out for project: '
|
||||
'{proj}').format(proj=project_id))
|
||||
raise exceptions.ProjectBusyException()
|
||||
return False
|
||||
|
||||
def decrement_quota(self, lock_session, _class, project_id, quantity=1):
|
||||
"""Decrements the object quota for a project
|
||||
|
||||
:param lock_session: Locking database session (autocommit=False)
|
||||
:param _class: Data model object to decrement quota
|
||||
:param project_id: Project ID to decrement quota on
|
||||
:param quantity: Quantity of quota to decrement
|
||||
:returns: None
|
||||
"""
|
||||
LOG.debug('Decrementing quota by: {quant} for project: {proj} '
|
||||
'object: {obj}'.format(quant=quantity, proj=project_id,
|
||||
obj=str(_class)))
|
||||
|
||||
if not project_id:
|
||||
raise exceptions.MissingProjectID()
|
||||
|
||||
# Lock the project record in the database to block other quota checks
|
||||
try:
|
||||
quotas = lock_session.query(models.Quotas).filter_by(
|
||||
project_id=project_id).with_for_update().first()
|
||||
if not quotas:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.error(_LE(
|
||||
'Quota decrement on {clss} called on project: {proj} '
|
||||
'with no quota record in the database.').format(
|
||||
clss=type(_class), proj=project_id))
|
||||
return
|
||||
if _class == data_models.LoadBalancer:
|
||||
if quotas.in_use_load_balancer > 0:
|
||||
quotas.in_use_load_balancer = (
|
||||
quotas.in_use_load_balancer - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.Listener:
|
||||
if quotas.in_use_listener > 0:
|
||||
quotas.in_use_listener = (
|
||||
quotas.in_use_listener - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.Pool:
|
||||
if quotas.in_use_pool > 0:
|
||||
quotas.in_use_pool = (
|
||||
quotas.in_use_pool - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.HealthMonitor:
|
||||
if quotas.in_use_health_monitor > 0:
|
||||
quotas.in_use_health_monitor = (
|
||||
quotas.in_use_health_monitor - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
if _class == data_models.Member:
|
||||
if quotas.in_use_member > 0:
|
||||
quotas.in_use_member = (
|
||||
quotas.in_use_member - quantity)
|
||||
else:
|
||||
if not CONF.auth_strategy == consts.NOAUTH:
|
||||
LOG.warning(_LW(
|
||||
'Quota decrement on {clss} called on project: '
|
||||
'{proj} that would cause a negative '
|
||||
'quota.').format(clss=type(_class),
|
||||
proj=project_id))
|
||||
except db_exception.DBDeadlock:
|
||||
LOG.warning(_LW('Quota project lock timed out for project: '
|
||||
'{proj}').format(proj=project_id))
|
||||
raise exceptions.ProjectBusyException()
|
||||
|
||||
def create_load_balancer_tree(self, session, lb_dict):
|
||||
listener_dicts = lb_dict.pop('listeners', [])
|
||||
vip_dict = lb_dict.pop('vip')
|
||||
with session.begin(subtransactions=True):
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
try:
|
||||
if self.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.LoadBalancer,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
lb_dm = self.create_load_balancer_and_vip(
|
||||
session, lb_dict, vip_dict)
|
||||
lock_session, lb_dict, vip_dict)
|
||||
for listener_dict in listener_dicts:
|
||||
# Add listener quota check
|
||||
if self.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Listener,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
pool_dict = listener_dict.pop('default_pool', None)
|
||||
l7policies_dict = listener_dict.pop('l7policies', None)
|
||||
sni_containers = listener_dict.pop('sni_containers', [])
|
||||
if pool_dict:
|
||||
# Add pool quota check
|
||||
if self.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Pool,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
hm_dict = pool_dict.pop('health_monitor', None)
|
||||
member_dicts = pool_dict.pop('members', [])
|
||||
sp_dict = pool_dict.pop('session_persistence', None)
|
||||
pool_dict['load_balancer_id'] = lb_dm.id
|
||||
del pool_dict['listener_id']
|
||||
pool_dm = self.pool.create(session, **pool_dict)
|
||||
pool_dm = self.pool.create(lock_session, **pool_dict)
|
||||
if sp_dict:
|
||||
sp_dict['pool_id'] = pool_dm.id
|
||||
self.session_persistence.create(session, **sp_dict)
|
||||
self.session_persistence.create(lock_session,
|
||||
**sp_dict)
|
||||
if hm_dict:
|
||||
# Add hm quota check
|
||||
if self.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.HealthMonitor,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
hm_dict['pool_id'] = pool_dm.id
|
||||
self.health_monitor.create(session, **hm_dict)
|
||||
self.health_monitor.create(lock_session, **hm_dict)
|
||||
for r_member_dict in member_dicts:
|
||||
# Add member quota check
|
||||
if self.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Member,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
r_member_dict['pool_id'] = pool_dm.id
|
||||
self.member.create(session, **r_member_dict)
|
||||
self.member.create(lock_session, **r_member_dict)
|
||||
listener_dict['default_pool_id'] = pool_dm.id
|
||||
self.listener.create(session, **listener_dict)
|
||||
self.listener.create(lock_session, **listener_dict)
|
||||
for sni_container in sni_containers:
|
||||
self.sni.create(session, **sni_container)
|
||||
self.sni.create(lock_session, **sni_container)
|
||||
if l7policies_dict:
|
||||
for policy_dict in l7policies_dict:
|
||||
l7rules_dict = policy_dict.pop('l7rules')
|
||||
if policy_dict.get('redirect_pool'):
|
||||
# Add pool quota check
|
||||
if self.check_quota_met(session,
|
||||
lock_session,
|
||||
data_models.Pool,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
r_pool_dict = policy_dict.pop(
|
||||
'redirect_pool')
|
||||
r_hm_dict = r_pool_dict.pop('health_monitor', None)
|
||||
r_sp_dict = r_pool_dict.pop('session_persistence',
|
||||
r_hm_dict = r_pool_dict.pop('health_monitor',
|
||||
None)
|
||||
r_sp_dict = r_pool_dict.pop(
|
||||
'session_persistence', None)
|
||||
r_member_dicts = r_pool_dict.pop('members', [])
|
||||
if 'listener_id' in r_pool_dict.keys():
|
||||
del r_pool_dict['listener_id']
|
||||
r_pool_dm = self.pool.create(
|
||||
session, **r_pool_dict)
|
||||
r_pool_dm = self.pool.create(lock_session,
|
||||
**r_pool_dict)
|
||||
if r_sp_dict:
|
||||
r_sp_dict['pool_id'] = r_pool_dm.id
|
||||
self.session_persistence.create(session,
|
||||
**r_sp_dict)
|
||||
self.session_persistence.create(
|
||||
lock_session, **r_sp_dict)
|
||||
if r_hm_dict:
|
||||
# Add hm quota check
|
||||
if self.check_quota_met(
|
||||
session,
|
||||
lock_session,
|
||||
data_models.HealthMonitor,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
r_hm_dict['pool_id'] = r_pool_dm.id
|
||||
self.health_monitor.create(session,
|
||||
self.health_monitor.create(lock_session,
|
||||
**r_hm_dict)
|
||||
for r_member_dict in r_member_dicts:
|
||||
# Add member quota check
|
||||
if self.check_quota_met(
|
||||
session,
|
||||
lock_session,
|
||||
data_models.Member,
|
||||
lb_dict['project_id']):
|
||||
raise exceptions.QuotaException
|
||||
r_member_dict['pool_id'] = r_pool_dm.id
|
||||
self.member.create(session, **r_member_dict)
|
||||
self.member.create(lock_session,
|
||||
**r_member_dict)
|
||||
policy_dict['redirect_pool_id'] = r_pool_dm.id
|
||||
policy_dm = self.l7policy.create(session,
|
||||
policy_dm = self.l7policy.create(lock_session,
|
||||
**policy_dict)
|
||||
for rule_dict in l7rules_dict:
|
||||
rule_dict['l7policy_id'] = policy_dm.id
|
||||
self.l7rule.create(session, **rule_dict)
|
||||
self.l7rule.create(lock_session, **rule_dict)
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
session.expire_all()
|
||||
return self.load_balancer.get(session, id=lb_dm.id)
|
||||
|
||||
|
||||
@ -320,10 +630,10 @@ class LoadBalancerRepository(BaseRepository):
|
||||
with session.begin(subtransactions=True):
|
||||
lb = session.query(self.model_class).with_for_update().filter_by(
|
||||
id=id).one()
|
||||
is_delete = status == constants.PENDING_DELETE
|
||||
is_delete = status == consts.PENDING_DELETE
|
||||
acceptable_statuses = (
|
||||
constants.DELETABLE_STATUSES
|
||||
if is_delete else constants.MUTABLE_STATUSES
|
||||
consts.DELETABLE_STATUSES
|
||||
if is_delete else consts.MUTABLE_STATUSES
|
||||
)
|
||||
if lb.provisioning_status not in acceptable_statuses:
|
||||
return False
|
||||
@ -416,7 +726,7 @@ class ListenerRepository(BaseRepository):
|
||||
listener.peer_port > max_peer_port):
|
||||
max_peer_port = listener.peer_port
|
||||
if max_peer_port == 0:
|
||||
return constants.HAPROXY_BASE_PEER_PORT
|
||||
return consts.HAPROXY_BASE_PEER_PORT
|
||||
else:
|
||||
return max_peer_port + 1
|
||||
|
||||
@ -563,7 +873,7 @@ class AmphoraRepository(BaseRepository):
|
||||
"""
|
||||
with session.begin(subtransactions=True):
|
||||
count = session.query(self.model_class).filter_by(
|
||||
status=constants.AMPHORA_READY, load_balancer_id=None).count()
|
||||
status=consts.AMPHORA_READY, load_balancer_id=None).count()
|
||||
|
||||
return count
|
||||
|
||||
@ -705,9 +1015,9 @@ class L7RuleRepository(BaseRepository):
|
||||
l7rule_dict.update({k: v})
|
||||
# Clear out the 'key' attribute for rule types that don't use it.
|
||||
if ('type' in l7rule_dict.keys() and
|
||||
l7rule_dict['type'] in (constants.L7RULE_TYPE_HOST_NAME,
|
||||
constants.L7RULE_TYPE_PATH,
|
||||
constants.L7RULE_TYPE_FILE_TYPE)):
|
||||
l7rule_dict['type'] in (consts.L7RULE_TYPE_HOST_NAME,
|
||||
consts.L7RULE_TYPE_PATH,
|
||||
consts.L7RULE_TYPE_FILE_TYPE)):
|
||||
l7rule_dict['key'] = None
|
||||
model_kwargs.update({'key': None})
|
||||
validate.l7rule_data(self.model_class(**l7rule_dict))
|
||||
@ -743,7 +1053,7 @@ class L7PolicyRepository(BaseRepository):
|
||||
|
||||
def _validate_l7policy_pool_data(self, session, l7policy):
|
||||
"""Does validations on a given L7 policy."""
|
||||
if l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_POOL:
|
||||
if l7policy.action == consts.L7POLICY_ACTION_REDIRECT_TO_POOL:
|
||||
session.expire(session.query(models.Listener).filter_by(
|
||||
id=l7policy.listener_id).first())
|
||||
listener = (session.query(models.Listener).
|
||||
@ -778,14 +1088,14 @@ class L7PolicyRepository(BaseRepository):
|
||||
|
||||
if l7policy.action:
|
||||
model_kwargs.update(action=l7policy.action)
|
||||
if l7policy.action == constants.L7POLICY_ACTION_REJECT:
|
||||
if l7policy.action == consts.L7POLICY_ACTION_REJECT:
|
||||
model_kwargs.update(redirect_url=None)
|
||||
model_kwargs.update(redirect_pool_id=None)
|
||||
elif (l7policy.action ==
|
||||
constants.L7POLICY_ACTION_REDIRECT_TO_URL):
|
||||
consts.L7POLICY_ACTION_REDIRECT_TO_URL):
|
||||
model_kwargs.update(redirect_pool_id=None)
|
||||
elif (l7policy.action ==
|
||||
constants.L7POLICY_ACTION_REDIRECT_TO_POOL):
|
||||
consts.L7POLICY_ACTION_REDIRECT_TO_POOL):
|
||||
model_kwargs.update(redirect_url=None)
|
||||
|
||||
l7policy_db.update(model_kwargs)
|
||||
@ -812,7 +1122,7 @@ class L7PolicyRepository(BaseRepository):
|
||||
# This is to work around unexpected / idiotic behavior of the
|
||||
# SQLAlchemy orderinglist extension.
|
||||
position = model_kwargs.pop('position', None)
|
||||
model_kwargs.update(position=constants.MAX_POLICY_POSITION)
|
||||
model_kwargs.update(position=consts.MAX_POLICY_POSITION)
|
||||
if not model_kwargs.get('id'):
|
||||
model_kwargs.update(id=uuidutils.generate_uuid())
|
||||
if model_kwargs.get('redirect_pool_id'):
|
||||
@ -862,3 +1172,35 @@ class L7PolicyRepository(BaseRepository):
|
||||
session.refresh(listener)
|
||||
listener.l7policies.reorder()
|
||||
session.flush()
|
||||
|
||||
|
||||
class QuotasRepository(BaseRepository):
|
||||
model_class = models.Quotas
|
||||
|
||||
def update(self, session, project_id, **model_kwargs):
|
||||
with session.begin(subtransactions=True):
|
||||
kwargs_quota = model_kwargs['quota']
|
||||
quotas = session.query(self.model_class).filter_by(
|
||||
project_id=project_id).with_for_update().first()
|
||||
if not quotas:
|
||||
quotas = models.Quotas(project_id=project_id)
|
||||
|
||||
for key, val in six.iteritems(kwargs_quota):
|
||||
setattr(quotas, key, val)
|
||||
session.add(quotas)
|
||||
session.flush()
|
||||
return self.get(session, project_id=project_id)
|
||||
|
||||
def delete(self, session, project_id):
|
||||
with session.begin(subtransactions=True):
|
||||
quotas = session.query(self.model_class).filter_by(
|
||||
project_id=project_id).with_for_update().first()
|
||||
if not quotas:
|
||||
raise exceptions.NotFound(
|
||||
resource=data_models.Quotas._name(), id=project_id)
|
||||
quotas.health_monitor = None
|
||||
quotas.load_balancer = None
|
||||
quotas.listener = None
|
||||
quotas.member = None
|
||||
quotas.pool = None
|
||||
session.flush()
|
||||
|
@ -29,6 +29,9 @@ from octavia.tests.functional.db import base as base_db_test
|
||||
class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
|
||||
BASE_PATH = '/v1'
|
||||
QUOTAS_PATH = '/quotas'
|
||||
QUOTA_PATH = QUOTAS_PATH + '/{project_id}'
|
||||
QUOTA_DEFAULT_PATH = QUOTAS_PATH + '/{project_id}/default'
|
||||
LBS_PATH = '/loadbalancers'
|
||||
LB_PATH = LBS_PATH + '/{lb_id}'
|
||||
LB_STATS_PATH = LB_PATH + '/stats'
|
||||
@ -66,7 +69,11 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
patcher = mock.patch('octavia.api.v1.handlers.controller_simulator.'
|
||||
'handler.SimulatedControllerHandler')
|
||||
self.handler_mock = patcher.start()
|
||||
self.check_quota_met_true_mock = mock.patch(
|
||||
'octavia.db.repositories.Repositories.check_quota_met',
|
||||
return_value=True)
|
||||
self.app = self._make_app()
|
||||
self.project_id = uuidutils.generate_uuid()
|
||||
|
||||
def reset_pecan():
|
||||
patcher.stop()
|
||||
@ -121,13 +128,14 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
return response
|
||||
|
||||
def create_load_balancer(self, vip, **optionals):
|
||||
req_dict = {'vip': vip}
|
||||
req_dict = {'vip': vip, 'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
response = self.post(self.LBS_PATH, req_dict)
|
||||
return response.json
|
||||
|
||||
def create_listener(self, lb_id, protocol, protocol_port, **optionals):
|
||||
req_dict = {'protocol': protocol, 'protocol_port': protocol_port}
|
||||
req_dict = {'protocol': protocol, 'protocol_port': protocol_port,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.LISTENERS_PATH.format(lb_id=lb_id)
|
||||
response = self.post(path, req_dict)
|
||||
@ -159,7 +167,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
|
||||
def create_pool_sans_listener(self, lb_id, protocol, lb_algorithm,
|
||||
**optionals):
|
||||
req_dict = {'protocol': protocol, 'lb_algorithm': lb_algorithm}
|
||||
req_dict = {'protocol': protocol, 'lb_algorithm': lb_algorithm,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.POOLS_PATH.format(lb_id=lb_id)
|
||||
response = self.post(path, req_dict)
|
||||
@ -167,7 +176,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
|
||||
def create_pool(self, lb_id, listener_id, protocol, lb_algorithm,
|
||||
**optionals):
|
||||
req_dict = {'protocol': protocol, 'lb_algorithm': lb_algorithm}
|
||||
req_dict = {'protocol': protocol, 'lb_algorithm': lb_algorithm,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.DEPRECATED_POOLS_PATH.format(lb_id=lb_id,
|
||||
listener_id=listener_id)
|
||||
@ -176,7 +186,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
|
||||
def create_member(self, lb_id, pool_id, ip_address,
|
||||
protocol_port, expect_error=False, **optionals):
|
||||
req_dict = {'ip_address': ip_address, 'protocol_port': protocol_port}
|
||||
req_dict = {'ip_address': ip_address, 'protocol_port': protocol_port,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.MEMBERS_PATH.format(lb_id=lb_id, pool_id=pool_id)
|
||||
response = self.post(path, req_dict, expect_errors=expect_error)
|
||||
@ -184,7 +195,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
|
||||
def create_member_with_listener(self, lb_id, listener_id, pool_id,
|
||||
ip_address, protocol_port, **optionals):
|
||||
req_dict = {'ip_address': ip_address, 'protocol_port': protocol_port}
|
||||
req_dict = {'ip_address': ip_address, 'protocol_port': protocol_port,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.DEPRECATED_MEMBERS_PATH.format(
|
||||
lb_id=lb_id, listener_id=listener_id, pool_id=pool_id)
|
||||
@ -198,7 +210,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
'delay': delay,
|
||||
'timeout': timeout,
|
||||
'fall_threshold': fall_threshold,
|
||||
'rise_threshold': rise_threshold}
|
||||
'rise_threshold': rise_threshold,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.HM_PATH.format(lb_id=lb_id,
|
||||
pool_id=pool_id)
|
||||
@ -212,7 +225,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
'delay': delay,
|
||||
'timeout': timeout,
|
||||
'fall_threshold': fall_threshold,
|
||||
'rise_threshold': rise_threshold}
|
||||
'rise_threshold': rise_threshold,
|
||||
'project_id': self.project_id}
|
||||
req_dict.update(optionals)
|
||||
path = self.DEPRECATED_HM_PATH.format(
|
||||
lb_id=lb_id, listener_id=listener_id, pool_id=pool_id)
|
||||
|
@ -119,6 +119,14 @@ class TestHealthMonitor(base.BaseAPITest):
|
||||
1, 1, 1, 1, project_id=pid)
|
||||
self.assertEqual(pid, api_hm.get('project_id'))
|
||||
|
||||
def test_create_over_quota(self):
|
||||
self.check_quota_met_true_mock.start()
|
||||
self.post(self.hm_path,
|
||||
body={'type': constants.HEALTH_MONITOR_HTTP,
|
||||
'delay': 1, 'timeout': 1, 'fall_threshold': 1,
|
||||
'rise_threshold': 1, 'project_id': self.project_id},
|
||||
status=403)
|
||||
|
||||
def test_bad_create(self):
|
||||
hm_json = {'name': 'test1'}
|
||||
self.post(self.deprecated_hm_path, hm_json, status=400)
|
||||
@ -256,10 +264,11 @@ class TestHealthMonitor(base.BaseAPITest):
|
||||
def test_create_when_lb_pending_update(self):
|
||||
self.put(self.LB_PATH.format(lb_id=self.lb.get('id')),
|
||||
body={'name': 'test_name_change'})
|
||||
self.post(self.hm_path, body={'type': constants.HEALTH_MONITOR_HTTP,
|
||||
'delay': 1, 'timeout': 1,
|
||||
'fall_threshold': 1,
|
||||
'rise_threshold': 1}, status=409)
|
||||
self.post(self.hm_path,
|
||||
body={'type': constants.HEALTH_MONITOR_HTTP,
|
||||
'delay': 1, 'timeout': 1, 'fall_threshold': 1,
|
||||
'rise_threshold': 1, 'project_id': self.project_id},
|
||||
status=409)
|
||||
|
||||
def test_update_when_lb_pending_update(self):
|
||||
self.create_health_monitor(self.lb.get('id'), self.pool.get('id'),
|
||||
@ -279,10 +288,11 @@ class TestHealthMonitor(base.BaseAPITest):
|
||||
|
||||
def test_create_when_lb_pending_delete(self):
|
||||
self.delete(self.LB_PATH.format(lb_id=self.lb.get('id')))
|
||||
self.post(self.hm_path, body={'type': constants.HEALTH_MONITOR_HTTP,
|
||||
'delay': 1, 'timeout': 1,
|
||||
'fall_threshold': 1,
|
||||
'rise_threshold': 1}, status=409)
|
||||
self.post(self.hm_path,
|
||||
body={'type': constants.HEALTH_MONITOR_HTTP,
|
||||
'delay': 1, 'timeout': 1, 'fall_threshold': 1,
|
||||
'rise_threshold': 1, 'project_id': self.project_id},
|
||||
status=409)
|
||||
|
||||
def test_update_when_lb_pending_delete(self):
|
||||
self.create_health_monitor(self.lb.get('id'), self.pool.get('id'),
|
||||
|
@ -126,7 +126,8 @@ class TestListener(base.BaseAPITest):
|
||||
'default_pool_id': self.pool.get('id'),
|
||||
'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80}
|
||||
'protocol_port': 80,
|
||||
'project_id': self.project_id}
|
||||
response = self.post(self.listeners_path, lb_listener)
|
||||
api_listener = response.json
|
||||
self.assertEqual(api_listener.get('default_pool_id'),
|
||||
@ -137,7 +138,8 @@ class TestListener(base.BaseAPITest):
|
||||
'default_pool_id': uuidutils.generate_uuid(),
|
||||
'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80}
|
||||
'protocol_port': 80,
|
||||
'project_id': self.project_id}
|
||||
self.post(self.listeners_path, lb_listener, status=404)
|
||||
|
||||
def test_create_with_id(self):
|
||||
@ -148,12 +150,14 @@ class TestListener(base.BaseAPITest):
|
||||
'default_pool_id': self.pool.get('id'),
|
||||
'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80}
|
||||
'protocol_port': 80,
|
||||
'project_id': self.project_id}
|
||||
lb_listener2 = {'name': 'listener2',
|
||||
'default_pool_id': self.pool.get('id'),
|
||||
'description': 'desc2',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 81}
|
||||
'protocol_port': 81,
|
||||
'project_id': self.project_id}
|
||||
listener1 = self.post(self.listeners_path, lb_listener1).json
|
||||
self.set_lb_status(self.lb.get('id'), constants.ACTIVE)
|
||||
listener2 = self.post(self.listeners_path, lb_listener2).json
|
||||
@ -178,10 +182,10 @@ class TestListener(base.BaseAPITest):
|
||||
defaults = {'name': None, 'default_pool_id': None,
|
||||
'description': None, 'enabled': True,
|
||||
'connection_limit': None, 'tls_certificate_id': None,
|
||||
'sni_containers': [], 'project_id': None,
|
||||
'insert_headers': {}}
|
||||
'sni_containers': [], 'insert_headers': {}}
|
||||
lb_listener = {'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80}
|
||||
'protocol_port': 80,
|
||||
'project_id': self.project_id}
|
||||
response = self.post(self.listeners_path, lb_listener)
|
||||
listener_api = response.json
|
||||
extra_expects = {'provisioning_status': constants.PENDING_CREATE,
|
||||
@ -200,6 +204,13 @@ class TestListener(base.BaseAPITest):
|
||||
self.assert_final_listener_statuses(self.lb.get('id'),
|
||||
listener_api.get('id'))
|
||||
|
||||
def test_create_over_quota(self):
|
||||
lb_listener = {'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80,
|
||||
'project_id': self.project_id}
|
||||
self.check_quota_met_true_mock.start()
|
||||
self.post(self.listeners_path, lb_listener, status=403)
|
||||
|
||||
def test_update(self):
|
||||
tls_uuid = uuidutils.generate_uuid()
|
||||
listener = self.create_listener(self.lb.get('id'),
|
||||
@ -259,7 +270,8 @@ class TestListener(base.BaseAPITest):
|
||||
constants.PROTOCOL_TCP, 80)
|
||||
self.set_lb_status(self.lb.get('id'))
|
||||
listener2_post = {'protocol': listener1.get('protocol'),
|
||||
'protocol_port': listener1.get('protocol_port')}
|
||||
'protocol_port': listener1.get('protocol_port'),
|
||||
'project_id': self.project_id}
|
||||
self.post(self.listeners_path, listener2_post, status=409)
|
||||
|
||||
def test_update_listeners_same_port(self):
|
||||
@ -329,7 +341,8 @@ class TestListener(base.BaseAPITest):
|
||||
enabled=False)
|
||||
lb_listener = {'name': 'listener1', 'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80, 'connection_limit': 10}
|
||||
'protocol_port': 80, 'connection_limit': 10,
|
||||
'project_id': self.project_id}
|
||||
self.post(self.LISTENERS_PATH.format(lb_id=lb.get('id')),
|
||||
lb_listener, status=409)
|
||||
|
||||
@ -339,7 +352,8 @@ class TestListener(base.BaseAPITest):
|
||||
self.set_lb_status(lb.get('id'))
|
||||
lb_listener = {'name': 'listener1', 'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80, 'connection_limit': 10}
|
||||
'protocol_port': 80, 'connection_limit': 10,
|
||||
'project_id': self.project_id}
|
||||
api_listener = self.post(
|
||||
self.LISTENERS_PATH.format(lb_id=lb.get('id')), lb_listener).json
|
||||
self.delete(self.LISTENER_PATH.format(
|
||||
@ -352,7 +366,8 @@ class TestListener(base.BaseAPITest):
|
||||
self.set_lb_status(lb.get('id'))
|
||||
lb_listener = {'name': 'listener1', 'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80, 'connection_limit': 10}
|
||||
'protocol_port': 80, 'connection_limit': 10,
|
||||
'project_id': self.project_id}
|
||||
api_listener = self.post(
|
||||
self.LISTENERS_PATH.format(lb_id=lb.get('id')), lb_listener).json
|
||||
self.set_lb_status(lb.get('id'))
|
||||
@ -367,7 +382,8 @@ class TestListener(base.BaseAPITest):
|
||||
self.set_lb_status(lb.get('id'))
|
||||
lb_listener = {'name': 'listener1', 'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80, 'connection_limit': 10}
|
||||
'protocol_port': 80, 'connection_limit': 10,
|
||||
'project_id': self.project_id}
|
||||
api_listener = self.post(
|
||||
self.LISTENERS_PATH.format(lb_id=lb.get('id')), lb_listener).json
|
||||
self.set_lb_status(lb.get('id'))
|
||||
@ -382,7 +398,8 @@ class TestListener(base.BaseAPITest):
|
||||
self.set_lb_status(lb.get('id'))
|
||||
lb_listener = {'name': 'listener1', 'description': 'desc1',
|
||||
'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
|
||||
'protocol_port': 80, 'connection_limit': 10}
|
||||
'protocol_port': 80, 'connection_limit': 10,
|
||||
'project_id': self.project_id}
|
||||
api_listener = self.post(
|
||||
self.LISTENERS_PATH.format(lb_id=lb.get('id')), lb_listener).json
|
||||
self.set_lb_status(lb.get('id'))
|
||||
@ -418,12 +435,14 @@ class TestListener(base.BaseAPITest):
|
||||
def test_create_with_valid_insert_headers(self):
|
||||
lb_listener = {'protocol': 'HTTP',
|
||||
'protocol_port': 80,
|
||||
'insert_headers': {'X-Forwarded-For': 'true'}}
|
||||
'insert_headers': {'X-Forwarded-For': 'true'},
|
||||
'project_id': self.project_id}
|
||||
self.post(self.listeners_path, lb_listener, status=202)
|
||||
|
||||
def test_create_with_bad_insert_headers(self):
|
||||
lb_listener = {'protocol': 'HTTP',
|
||||
'protocol_port': 80,
|
||||
# 'insert_headers': {'x': 'x'}}
|
||||
'insert_headers': {'X-Forwarded-Four': 'true'}}
|
||||
'insert_headers': {'X-Forwarded-Four': 'true'},
|
||||
'project_id': self.project_id}
|
||||
self.post(self.listeners_path, lb_listener, status=400)
|
||||
|
@ -30,7 +30,7 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
self.assertEqual([], api_list)
|
||||
|
||||
def test_create(self, **optionals):
|
||||
lb_json = {'name': 'test1', 'vip': {}}
|
||||
lb_json = {'name': 'test1', 'vip': {}, 'project_id': self.project_id}
|
||||
lb_json.update(optionals)
|
||||
response = self.post(self.LBS_PATH, lb_json)
|
||||
api_lb = response.json
|
||||
@ -62,11 +62,17 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
def test_create_with_project_id(self):
|
||||
self.test_create(project_id=uuidutils.generate_uuid())
|
||||
|
||||
def test_create_over_quota(self):
|
||||
lb_json = {'name': 'test1', 'vip': {}, 'project_id': self.project_id}
|
||||
self.check_quota_met_true_mock.start()
|
||||
self.post(self.LBS_PATH, lb_json, status=403)
|
||||
|
||||
def test_get_all(self):
|
||||
lb1 = self.create_load_balancer({}, name='lb1')
|
||||
lb2 = self.create_load_balancer({}, name='lb2')
|
||||
lb3 = self.create_load_balancer({}, name='lb3')
|
||||
response = self.get(self.LBS_PATH)
|
||||
response = self.get(self.LBS_PATH,
|
||||
params={'project_id': self.project_id})
|
||||
lbs = response.json
|
||||
lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs]
|
||||
self.assertEqual(3, len(lbs))
|
||||
@ -116,7 +122,8 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
'subnet_id': uuidutils.generate_uuid(),
|
||||
'port_id': uuidutils.generate_uuid()}
|
||||
lb_json = {'name': 'test1', 'description': 'test1_desc',
|
||||
'vip': vip, 'enabled': False}
|
||||
'vip': vip, 'enabled': False,
|
||||
'project_id': self.project_id}
|
||||
response = self.post(self.LBS_PATH, lb_json)
|
||||
api_lb = response.json
|
||||
self.assertTrue(uuidutils.is_uuid_like(api_lb.get('id')))
|
||||
@ -247,8 +254,10 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
net_mock.return_value.get_subnet = mock.Mock(
|
||||
side_effect=network_base.SubnetNotFound('Subnet not found'))
|
||||
subnet_id = uuidutils.generate_uuid()
|
||||
lb_json = {'name': 'test1', 'vip': {'subnet_id': subnet_id,
|
||||
'ip_address': '10.0.0.1'}}
|
||||
lb_json = {'name': 'test1',
|
||||
'vip': {'subnet_id': subnet_id,
|
||||
'ip_address': '10.0.0.1'},
|
||||
'project_id': self.project_id}
|
||||
lb_json.update(optionals)
|
||||
response = self.post(self.LBS_PATH, lb_json, expect_errors=True)
|
||||
err_msg = 'Subnet ' + subnet_id + ' not found.'
|
||||
@ -259,8 +268,10 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
with mock.patch(
|
||||
'octavia.common.utils.get_network_driver') as net_mock:
|
||||
net_mock.return_value.get_subnet.return_value = subnet_id
|
||||
lb_json = {'name': 'test1', 'vip': {'subnet_id': subnet_id,
|
||||
'ip_address': '10.0.0.1'}}
|
||||
lb_json = {'name': 'test1',
|
||||
'vip': {'subnet_id': subnet_id,
|
||||
'ip_address': '10.0.0.1'},
|
||||
'project_id': self.project_id}
|
||||
lb_json.update(optionals)
|
||||
response = self.post(self.LBS_PATH, lb_json)
|
||||
api_lb = response.json
|
||||
|
@ -176,7 +176,8 @@ class TestMember(base.BaseAPITest):
|
||||
constants.ACTIVE, constants.ONLINE)
|
||||
|
||||
def test_duplicate_create(self):
|
||||
member = {'ip_address': '10.0.0.1', 'protocol_port': 80}
|
||||
member = {'ip_address': '10.0.0.1', 'protocol_port': 80,
|
||||
'project_id': self.project_id}
|
||||
self.post(self.members_path, member, status=202)
|
||||
self.set_lb_status(self.lb.get('id'))
|
||||
self.post(self.members_path, member, status=409)
|
||||
@ -207,6 +208,11 @@ class TestMember(base.BaseAPITest):
|
||||
self.assertEqual(80, response.get('protocol_port'))
|
||||
self.assertEqual(subnet_id, response.get('subnet_id'))
|
||||
|
||||
def test_create_over_quota(self):
|
||||
self.check_quota_met_true_mock.start()
|
||||
body = {'ip_address': '10.0.0.3', 'protocol_port': 81}
|
||||
self.post(self.members_path, body, status=403)
|
||||
|
||||
def test_update(self):
|
||||
old_port = 80
|
||||
new_port = 88
|
||||
@ -340,7 +346,8 @@ class TestMember(base.BaseAPITest):
|
||||
self.put(self.LB_PATH.format(lb_id=self.lb.get('id')),
|
||||
body={'name': 'test_name_change'})
|
||||
self.post(self.members_path,
|
||||
body={'ip_address': '10.0.0.1', 'protocol_port': 80},
|
||||
body={'ip_address': '10.0.0.1', 'protocol_port': 80,
|
||||
'project_id': self.project_id},
|
||||
status=409)
|
||||
|
||||
def test_update_when_lb_pending_update(self):
|
||||
@ -370,7 +377,8 @@ class TestMember(base.BaseAPITest):
|
||||
self.set_lb_status(self.lb.get('id'))
|
||||
self.delete(self.LB_PATH.format(lb_id=self.lb.get('id')))
|
||||
self.post(self.members_path,
|
||||
body={'ip_address': '10.0.0.2', 'protocol_port': 88},
|
||||
body={'ip_address': '10.0.0.2', 'protocol_port': 88,
|
||||
'project_id': self.project_id},
|
||||
status=409)
|
||||
|
||||
def test_update_when_lb_pending_delete(self):
|
||||
|
@ -177,7 +177,8 @@ class TestPool(base.BaseAPITest):
|
||||
path = self.POOLS_PATH.format(lb_id=self.lb.get('id'),
|
||||
listener_id=self.listener.get('id'))
|
||||
body = {'id': pool.get('id'), 'protocol': constants.PROTOCOL_HTTP,
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
|
||||
'project_id': self.project_id}
|
||||
self.post(path, body, status=409, expect_errors=True)
|
||||
|
||||
def test_bad_create(self):
|
||||
@ -199,7 +200,8 @@ class TestPool(base.BaseAPITest):
|
||||
path = self.pools_path_deprecated.format(
|
||||
lb_id=self.lb.get('id'), listener_id=self.listener.get('id'))
|
||||
body = {'protocol': constants.PROTOCOL_HTTP,
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
|
||||
'project_id': self.project_id}
|
||||
self.post(path, body, status=409, expect_errors=True)
|
||||
|
||||
def test_create_bad_protocol(self):
|
||||
@ -221,6 +223,13 @@ class TestPool(base.BaseAPITest):
|
||||
constants.PENDING_UPDATE,
|
||||
constants.ERROR)
|
||||
|
||||
def test_create_over_quota(self):
|
||||
self.check_quota_met_true_mock.start()
|
||||
body = {'protocol': constants.PROTOCOL_HTTP,
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
|
||||
'project_id': self.project_id}
|
||||
self.post(self.pools_path, body, status=403)
|
||||
|
||||
def test_update(self):
|
||||
api_pool = self.create_pool(self.lb.get('id'),
|
||||
self.listener.get('id'),
|
||||
@ -538,7 +547,8 @@ class TestPool(base.BaseAPITest):
|
||||
body={'name': 'test_name_change'})
|
||||
self.post(self.pools_path,
|
||||
body={'protocol': constants.PROTOCOL_HTTP,
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN},
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
|
||||
'project_id': self.project_id},
|
||||
status=409)
|
||||
|
||||
def test_update_when_lb_pending_update(self):
|
||||
@ -565,7 +575,8 @@ class TestPool(base.BaseAPITest):
|
||||
self.delete(self.LB_PATH.format(lb_id=self.lb.get('id')))
|
||||
self.post(self.pools_path,
|
||||
body={'protocol': constants.PROTOCOL_HTTP,
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN},
|
||||
'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN,
|
||||
'project_id': self.project_id},
|
||||
status=409)
|
||||
|
||||
def test_update_when_lb_pending_delete(self):
|
||||
|
155
octavia/tests/functional/api/v1/test_quotas.py
Normal file
155
octavia/tests/functional/api/v1/test_quotas.py
Normal file
@ -0,0 +1,155 @@
|
||||
# Copyright 2016 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import random
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as oslo_fixture
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from octavia.common import constants as const
|
||||
from octavia.tests.functional.api.v1 import base
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestQuotas(base.BaseAPITest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestQuotas, self).setUp()
|
||||
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||
conf.config(
|
||||
group="quotas",
|
||||
default_load_balancer_quota=random.randrange(const.QUOTA_UNLIMITED,
|
||||
9000))
|
||||
conf.config(
|
||||
group="quotas",
|
||||
default_listener_quota=random.randrange(const.QUOTA_UNLIMITED,
|
||||
9000))
|
||||
conf.config(
|
||||
group="quotas",
|
||||
default_member_quota=random.randrange(const.QUOTA_UNLIMITED, 9000))
|
||||
# We need to make sure unlimited gets tested each pass
|
||||
conf.config(group="quotas", default_pool_quota=const.QUOTA_UNLIMITED)
|
||||
conf.config(
|
||||
group="quotas",
|
||||
default_health_monitor_quota=random.randrange(
|
||||
const.QUOTA_UNLIMITED, 9000))
|
||||
|
||||
self.project_id = uuidutils.generate_uuid()
|
||||
|
||||
def _assert_quotas_equal(self, observed, expected=None):
|
||||
if not expected:
|
||||
expected = {'load_balancer':
|
||||
CONF.quotas.default_load_balancer_quota,
|
||||
'listener': CONF.quotas.default_listener_quota,
|
||||
'pool': CONF.quotas.default_pool_quota,
|
||||
'health_monitor':
|
||||
CONF.quotas.default_health_monitor_quota,
|
||||
'member': CONF.quotas.default_member_quota}
|
||||
self.assertEqual(expected['load_balancer'], observed['load_balancer'])
|
||||
self.assertEqual(expected['listener'], observed['listener'])
|
||||
self.assertEqual(expected['pool'], observed['pool'])
|
||||
self.assertEqual(expected['health_monitor'],
|
||||
observed['health_monitor'])
|
||||
self.assertEqual(expected['member'], observed['member'])
|
||||
|
||||
def test_get_all_quotas_no_quotas(self):
|
||||
response = self.get(self.QUOTAS_PATH)
|
||||
quota_list = response.json
|
||||
self.assertEqual({'quotas': []}, quota_list)
|
||||
|
||||
def test_get_all_quotas_with_quotas(self):
|
||||
project_id1 = uuidutils.generate_uuid()
|
||||
project_id2 = uuidutils.generate_uuid()
|
||||
quota_path1 = self.QUOTA_PATH.format(project_id=project_id1)
|
||||
quota1 = {'load_balancer': const.QUOTA_UNLIMITED, 'listener': 30,
|
||||
'pool': 30, 'health_monitor': 30, 'member': 30}
|
||||
body1 = {'quota': quota1}
|
||||
self.put(quota_path1, body1)
|
||||
quota_path2 = self.QUOTA_PATH.format(project_id=project_id2)
|
||||
quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50,
|
||||
'health_monitor': 50, 'member': 50}
|
||||
body2 = {'quota': quota2}
|
||||
self.put(quota_path2, body2)
|
||||
|
||||
response = self.get(self.QUOTAS_PATH)
|
||||
quota_list = response.json
|
||||
|
||||
quota1['project_id'] = project_id1
|
||||
quota1['tenant_id'] = project_id1
|
||||
quota2['project_id'] = project_id2
|
||||
quota2['tenant_id'] = project_id2
|
||||
expected = {'quotas': [quota1, quota2]}
|
||||
self.assertEqual(expected, quota_list)
|
||||
|
||||
def test_get_default_quotas(self):
|
||||
response = self.get(self.QUOTA_DEFAULT_PATH.format(
|
||||
project_id=self.project_id))
|
||||
quota_dict = response.json
|
||||
self._assert_quotas_equal(quota_dict['quota'])
|
||||
|
||||
def test_custom_quotas(self):
|
||||
quota_path = self.QUOTA_PATH.format(project_id=self.project_id)
|
||||
body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30,
|
||||
'health_monitor': 30, 'member': 30}}
|
||||
self.put(quota_path, body)
|
||||
response = self.get(quota_path)
|
||||
quota_dict = response.json
|
||||
self._assert_quotas_equal(quota_dict['quota'], expected=body['quota'])
|
||||
|
||||
def test_custom_partial_quotas(self):
|
||||
quota_path = self.QUOTA_PATH.format(project_id=self.project_id)
|
||||
body = {'quota': {'load_balancer': 30, 'listener': None, 'pool': 30,
|
||||
'health_monitor': 30, 'member': 30}}
|
||||
expected_body = {'quota': {
|
||||
'load_balancer': 30,
|
||||
'listener': CONF.quotas.default_listener_quota, 'pool': 30,
|
||||
'health_monitor': 30, 'member': 30}}
|
||||
self.put(quota_path, body)
|
||||
response = self.get(quota_path)
|
||||
quota_dict = response.json
|
||||
self._assert_quotas_equal(quota_dict['quota'],
|
||||
expected=expected_body['quota'])
|
||||
|
||||
def test_custom_missing_quotas(self):
|
||||
quota_path = self.QUOTA_PATH.format(project_id=self.project_id)
|
||||
body = {'quota': {'load_balancer': 30, 'pool': 30,
|
||||
'health_monitor': 30, 'member': 30}}
|
||||
expected_body = {'quota': {
|
||||
'load_balancer': 30,
|
||||
'listener': CONF.quotas.default_listener_quota, 'pool': 30,
|
||||
'health_monitor': 30, 'member': 30}}
|
||||
self.put(quota_path, body)
|
||||
response = self.get(quota_path)
|
||||
quota_dict = response.json
|
||||
self._assert_quotas_equal(quota_dict['quota'],
|
||||
expected=expected_body['quota'])
|
||||
|
||||
def test_delete_custom_quotas(self):
|
||||
quota_path = self.QUOTA_PATH.format(project_id=self.project_id)
|
||||
body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30,
|
||||
'health_monitor': 30, 'member': 30}}
|
||||
self.put(quota_path, body)
|
||||
response = self.get(quota_path)
|
||||
quota_dict = response.json
|
||||
self._assert_quotas_equal(quota_dict['quota'], expected=body['quota'])
|
||||
self.delete(quota_path)
|
||||
response = self.get(quota_path)
|
||||
quota_dict = response.json
|
||||
self._assert_quotas_equal(quota_dict['quota'])
|
||||
|
||||
def test_delete_non_existent_custom_quotas(self):
|
||||
quota_path = self.QUOTA_PATH.format(project_id='bogus')
|
||||
self.delete(quota_path, status=404)
|
File diff suppressed because it is too large
Load Diff
@ -15,6 +15,7 @@
|
||||
from oslo_serialization import jsonutils
|
||||
from six.moves.urllib import parse
|
||||
from tempest.lib.common import rest_client
|
||||
from tempest.lib import exceptions as tempest_exceptions
|
||||
|
||||
|
||||
class LoadBalancersClient(rest_client.RestClient):
|
||||
@ -53,6 +54,15 @@ class LoadBalancersClient(rest_client.RestClient):
|
||||
self.expected_success(202, resp.status)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def create_load_balancer_graph(self, lbgraph):
|
||||
"""Create a load balancer graph build."""
|
||||
url = self._LOAD_BALANCERS_URL
|
||||
post_body = jsonutils.dumps(lbgraph)
|
||||
resp, body = self.post(url, post_body)
|
||||
body = jsonutils.loads(body)
|
||||
self.expected_success(202, resp.status)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def update_load_balancer(self, lb_id, **kwargs):
|
||||
"""Update a load balancer build."""
|
||||
url = self._LOAD_BALANCER_URL.format(lb_id=lb_id)
|
||||
@ -68,3 +78,15 @@ class LoadBalancersClient(rest_client.RestClient):
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(202, resp.status)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def create_load_balancer_over_quota(self, **kwargs):
|
||||
"""Attempt to build a load balancer over quota."""
|
||||
url = self._LOAD_BALANCERS_URL
|
||||
post_body = jsonutils.dumps(kwargs)
|
||||
try:
|
||||
resp, body = self.post(url, post_body)
|
||||
except tempest_exceptions.Forbidden:
|
||||
# This is what we expect to happen
|
||||
return
|
||||
assert resp.status == 403, "Expected over quota 403 response"
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
59
octavia/tests/tempest/v1/clients/quotas_client.py
Normal file
59
octavia/tests/tempest/v1/clients/quotas_client.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright 2016 Rackspace US Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from six.moves.urllib import parse
|
||||
from tempest.lib.common import rest_client
|
||||
|
||||
|
||||
class QuotasClient(rest_client.RestClient):
|
||||
"""Tests Quotas API."""
|
||||
|
||||
_QUOTAS_URL = "v1/{project_id}/quotas"
|
||||
|
||||
def list_quotas(self, params=None):
|
||||
"""List all non-default quotas."""
|
||||
url = "v1/quotas"
|
||||
if params:
|
||||
url = '{0}?{1}'.format(url, parse.urlencode(params))
|
||||
resp, body = self.get(url)
|
||||
body = jsonutils.loads(body)
|
||||
self.expected_success(200, resp.status)
|
||||
return rest_client.ResponseBodyList(resp, body)
|
||||
|
||||
def get_quotas(self, project_id, params=None):
|
||||
"""Get Quotas for a project."""
|
||||
url = self._QUOTAS_URL.format(project_id=project_id)
|
||||
if params:
|
||||
url = '{0}?{1}'.format(url, parse.urlencode(params))
|
||||
resp, body = self.get(url)
|
||||
body = jsonutils.loads(body)
|
||||
self.expected_success(200, resp.status)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def update_quotas(self, project_id, **kwargs):
|
||||
"""Update a Quotas for a project."""
|
||||
url = self._QUOTAS_URL.format(project_id=project_id)
|
||||
put_body = jsonutils.dumps(kwargs)
|
||||
resp, body = self.put(url, put_body)
|
||||
body = jsonutils.loads(body)
|
||||
self.expected_success(202, resp.status)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_quotas(self, project_id):
|
||||
"""Delete an Quotas for a project."""
|
||||
url = self._QUOTAS_URL.format(project_id=project_id)
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(202, resp.status)
|
||||
return rest_client.ResponseBody(resp, body)
|
@ -46,6 +46,7 @@ from octavia.tests.tempest.v1.clients import listeners_client
|
||||
from octavia.tests.tempest.v1.clients import load_balancers_client
|
||||
from octavia.tests.tempest.v1.clients import members_client
|
||||
from octavia.tests.tempest.v1.clients import pools_client
|
||||
from octavia.tests.tempest.v1.clients import quotas_client
|
||||
|
||||
config = config.CONF
|
||||
|
||||
@ -69,15 +70,12 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
self.num = 50
|
||||
self.server_fixed_ips = {}
|
||||
|
||||
self._create_security_group_for_test()
|
||||
self._set_net_and_subnet()
|
||||
|
||||
mgr = self.get_client_manager()
|
||||
|
||||
auth_provider = mgr.auth_provider
|
||||
region = config.network.region or config.identity.region
|
||||
self.client_args = [auth_provider, 'octavia', region]
|
||||
self.networks_client = (
|
||||
self.load_balancers_client = (
|
||||
load_balancers_client.LoadBalancersClient(*self.client_args))
|
||||
self.listeners_client = (
|
||||
listeners_client.ListenersClient(*self.client_args))
|
||||
@ -87,6 +85,10 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
self.health_monitors_client = (
|
||||
health_monitors_client.HealthMonitorsClient(
|
||||
*self.client_args))
|
||||
self.quotas_client = quotas_client.QuotasClient(*self.client_args)
|
||||
|
||||
self._create_security_group_for_test()
|
||||
self._set_net_and_subnet()
|
||||
|
||||
# admin network client needed for assigning octavia port to flip
|
||||
admin_manager = credentials_factory.AdminManager()
|
||||
@ -114,7 +116,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
The configured private network and associated subnet is used as a
|
||||
fallback in absence of tenant networking.
|
||||
"""
|
||||
tenant_id = self.networks_client.tenant_id
|
||||
tenant_id = self.load_balancers_client.tenant_id
|
||||
try:
|
||||
tenant_net = self._list_networks(tenant_id=tenant_id)[0]
|
||||
except IndexError:
|
||||
@ -136,7 +138,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
|
||||
def _create_security_group_for_test(self):
|
||||
self.security_group = self._create_security_group(
|
||||
tenant_id=self.networks_client.tenant_id)
|
||||
tenant_id=self.load_balancers_client.tenant_id)
|
||||
self._create_security_group_rules_for_port(self.start_port)
|
||||
self._create_security_group_rules_for_port(self.start_port + 1)
|
||||
|
||||
@ -149,11 +151,11 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
}
|
||||
self._create_security_group_rule(
|
||||
secgroup=self.security_group,
|
||||
tenant_id=self.networks_client.tenant_id,
|
||||
tenant_id=self.load_balancers_client.tenant_id,
|
||||
**rule)
|
||||
|
||||
def _ipv6_subnet(self, address6_mode):
|
||||
tenant_id = self.networks_client.tenant_id
|
||||
tenant_id = self.load_balancers_client.tenant_id
|
||||
router = self._get_router(tenant_id=tenant_id)
|
||||
self.network = self._create_network(tenant_id=tenant_id)
|
||||
self.subnet = self._create_subnet(network=self.network,
|
||||
@ -337,7 +339,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
|
||||
def _cleanup_load_balancer(self, load_balancer_id):
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.networks_client.delete_load_balancer, load_balancer_id)
|
||||
self.load_balancers_client.delete_load_balancer, load_balancer_id)
|
||||
self._wait_for_load_balancer_status(load_balancer_id, delete=True)
|
||||
|
||||
def _cleanup_listener(self, listener_id, load_balancer_id=None):
|
||||
@ -446,8 +448,10 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
:param ip_version: IP version to be used for the VIP IP
|
||||
:returns: ID of the created load balancer
|
||||
"""
|
||||
self.create_lb_kwargs = {'vip': {'subnet_id': self.subnet['id']}}
|
||||
self.load_balancer = self.networks_client.create_load_balancer(
|
||||
self.create_lb_kwargs = {
|
||||
'vip': {'subnet_id': self.subnet['id']},
|
||||
'project_id': self.load_balancers_client.tenant_id}
|
||||
self.load_balancer = self.load_balancers_client.create_load_balancer(
|
||||
**self.create_lb_kwargs)
|
||||
lb_id = self.load_balancer['id']
|
||||
self.addCleanup(self._cleanup_load_balancer, lb_id)
|
||||
@ -479,6 +483,36 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
security_groups=[self.security_group['id']])
|
||||
return lb_id
|
||||
|
||||
def _create_load_balancer_over_quota(self):
|
||||
"""Attempt to create a load balancer over quota.
|
||||
|
||||
Creates two load balancers one after the other expecting
|
||||
the second create to exceed the configured quota.
|
||||
|
||||
:returns: Response body from the request
|
||||
"""
|
||||
self.create_lb_kwargs = {
|
||||
'vip': {'subnet_id': self.subnet['id']},
|
||||
'project_id': self.load_balancers_client.tenant_id}
|
||||
self.load_balancer = self.load_balancers_client.create_load_balancer(
|
||||
**self.create_lb_kwargs)
|
||||
lb_id = self.load_balancer['id']
|
||||
self.addCleanup(self._cleanup_load_balancer, lb_id)
|
||||
|
||||
self.create_lb_kwargs = {
|
||||
'vip': {'subnet_id': self.subnet['id']},
|
||||
'project_id': self.load_balancers_client.tenant_id}
|
||||
lb_client = self.load_balancers_client
|
||||
lb_client.create_load_balancer_over_quota(
|
||||
**self.create_lb_kwargs)
|
||||
|
||||
LOG.info(('Waiting for lb status on create load balancer id: {0}'
|
||||
.format(lb_id)))
|
||||
self.load_balancer = self._wait_for_load_balancer_status(
|
||||
load_balancer_id=lb_id,
|
||||
provisioning_status='ACTIVE',
|
||||
operating_status='ONLINE')
|
||||
|
||||
def _wait_for_load_balancer_status(self, load_balancer_id,
|
||||
provisioning_status='ACTIVE',
|
||||
operating_status='ONLINE',
|
||||
@ -488,7 +522,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
end_time = time.time() + timeout
|
||||
while time.time() < end_time:
|
||||
try:
|
||||
lb = self.networks_client.get_load_balancer(
|
||||
lb = self.load_balancers_client.get_load_balancer(
|
||||
load_balancer_id)
|
||||
except lib_exc.NotFound as e:
|
||||
if delete:
|
||||
@ -794,3 +828,76 @@ class BaseTestCase(manager.NetworkScenarioTest):
|
||||
"output {2}, error {3}").format(cmd, proc.returncode,
|
||||
stdout, stderr))
|
||||
return stdout
|
||||
|
||||
def _set_quotas(self, project_id=None, load_balancer=20, listener=20,
|
||||
pool=20, health_monitor=20, member=20):
|
||||
if not project_id:
|
||||
project_id = self.networks_client.tenant_id
|
||||
body = {'quota': {
|
||||
'load_balancer': load_balancer, 'listener': listener,
|
||||
'pool': pool, 'health_monitor': health_monitor, 'member': member}}
|
||||
return self.quotas_client.update_quotas(project_id, **body)
|
||||
|
||||
def _create_load_balancer_tree(self, ip_version=4):
|
||||
# TODO(ptoohill): remove or null out project ID when Octavia supports
|
||||
# keystone auth and automatically populates it for us.
|
||||
project_id = self.networks_client.tenant_id
|
||||
|
||||
create_members = self._create_members_kwargs(self.subnet['id'])
|
||||
create_pool = {'project_id': project_id,
|
||||
'lb_algorithm': 'ROUND_ROBIN',
|
||||
'protocol': 'HTTP',
|
||||
'members': create_members}
|
||||
create_listener = {'project_id': project_id,
|
||||
'protocol': 'HTTP',
|
||||
'protocol_port': 80,
|
||||
'default_pool': create_pool}
|
||||
create_lb = {'project_id': project_id,
|
||||
'vip': {'subnet_id': self.subnet['id']},
|
||||
'listeners': [create_listener]}
|
||||
|
||||
# Set quotas back and finish the test
|
||||
self._set_quotas(project_id=project_id)
|
||||
self.load_balancer = (self.load_balancers_client
|
||||
.create_load_balancer_graph(create_lb))
|
||||
|
||||
load_balancer_id = self.load_balancer['id']
|
||||
self.addCleanup(self._cleanup_load_balancer, load_balancer_id)
|
||||
LOG.info(('Waiting for lb status on create load balancer id: {0}'
|
||||
.format(load_balancer_id)))
|
||||
self.load_balancer = self._wait_for_load_balancer_status(
|
||||
load_balancer_id)
|
||||
|
||||
self.vip_ip = self.load_balancer['vip'].get('ip_address')
|
||||
|
||||
# if the ipv4 is used for lb, then fetch the right values from
|
||||
# tempest.conf file
|
||||
if ip_version == 4:
|
||||
if (config.network.public_network_id and
|
||||
not config.network.project_networks_reachable):
|
||||
load_balancer = self.load_balancer
|
||||
self._assign_floating_ip_to_lb_vip(load_balancer)
|
||||
self.vip_ip = self.floating_ips[
|
||||
load_balancer['id']][0]['floating_ip_address']
|
||||
|
||||
# Currently the ovs-agent is not enforcing security groups on the
|
||||
# vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
|
||||
# However the linuxbridge-agent does, and it is necessary to add a
|
||||
# security group with a rule that allows tcp port 80 to the vip port.
|
||||
self.ports_client_admin.update_port(
|
||||
self.load_balancer['vip']['port_id'],
|
||||
security_groups=[self.security_group['id']])
|
||||
|
||||
def _create_members_kwargs(self, subnet_id=None):
|
||||
"""Create one or more Members
|
||||
|
||||
In case there is only one server, create both members with the same ip
|
||||
but with different ports to listen on.
|
||||
"""
|
||||
create_member_kwargs = []
|
||||
for server_id, ip in six.iteritems(self.server_fixed_ips):
|
||||
create_member_kwargs.append({'ip_address': ip,
|
||||
'protocol_port': 80,
|
||||
'weight': 50,
|
||||
'subnet_id': subnet_id})
|
||||
return create_member_kwargs
|
||||
|
34
octavia/tests/tempest/v1/scenario/test_lb_quota.py
Normal file
34
octavia/tests/tempest/v1/scenario/test_lb_quota.py
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2017 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
|
||||
from octavia.tests.tempest.v1.scenario import base
|
||||
|
||||
|
||||
class TestLoadBalancerQuota(base.BaseTestCase):
|
||||
"""This tests attempts to exceed a set load balancer quota.
|
||||
|
||||
The following is the scenario outline:
|
||||
1. Set the load balancer quota to one.
|
||||
2. Create two load balancers, expecting the second create to fail
|
||||
with a quota exceeded code.
|
||||
"""
|
||||
|
||||
@test.services('compute', 'network')
|
||||
@decorators.skip_because(bug="1656110")
|
||||
def test_load_balancer_quota(self):
|
||||
self._set_quotas(project_id=None, load_balancer=1)
|
||||
self._create_load_balancer_over_quota()
|
@ -0,0 +1,48 @@
|
||||
# Copyright 2016 Rackspace US Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest import config
|
||||
from tempest import test
|
||||
|
||||
|
||||
from octavia.tests.tempest.v1.scenario import base
|
||||
|
||||
config = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestLoadBalancerTreeMinimal(base.BaseTestCase):
|
||||
|
||||
@test.services('compute', 'network')
|
||||
def test_load_balancer_tree_minimal(self):
|
||||
"""This test checks basic load balancing.
|
||||
|
||||
The following is the scenario outline:
|
||||
1. Create an instance.
|
||||
2. SSH to the instance and start two servers.
|
||||
3. Create a load balancer graph with two members and with ROUND_ROBIN
|
||||
algorithm.
|
||||
4. Associate the VIP with a floating ip.
|
||||
5. Send NUM requests to the floating ip and check that they are shared
|
||||
between the two servers.
|
||||
"""
|
||||
self._create_server('server1')
|
||||
self._start_backend_httpd_processes('server1')
|
||||
self._create_server('server2')
|
||||
self._start_backend_httpd_processes('server2')
|
||||
self._create_load_balancer_tree()
|
||||
self._check_members_balanced(['server1_0', 'server2_0'])
|
@ -120,7 +120,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
||||
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
|
||||
|
||||
self.assertEqual(0, len(lb_flow.provides))
|
||||
self.assertEqual(4, len(lb_flow.requires))
|
||||
self.assertEqual(5, len(lb_flow.requires))
|
||||
|
||||
def test_get_new_LB_networking_subflow(self, mock_get_net_driver):
|
||||
|
||||
|
@ -50,7 +50,7 @@ class TestPoolFlows(base.TestCase):
|
||||
self.assertIn(constants.POOL, pool_flow.requires)
|
||||
|
||||
self.assertEqual(3, len(pool_flow.requires))
|
||||
self.assertEqual(0, len(pool_flow.provides))
|
||||
self.assertEqual(1, len(pool_flow.provides))
|
||||
|
||||
def test_get_delete_pool_flow_internal(self):
|
||||
|
||||
@ -59,7 +59,7 @@ class TestPoolFlows(base.TestCase):
|
||||
self.assertIsInstance(pool_flow, flow.Flow)
|
||||
self.assertIn('test', pool_flow.requires)
|
||||
|
||||
self.assertEqual(1, len(pool_flow.requires))
|
||||
self.assertEqual(2, len(pool_flow.requires))
|
||||
self.assertEqual(0, len(pool_flow.provides))
|
||||
|
||||
def test_get_update_pool_flow(self):
|
||||
|
@ -0,0 +1,322 @@
|
||||
# Copyright 2017 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
from oslo_utils import uuidutils
|
||||
from taskflow.types import failure
|
||||
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.controller.worker.tasks import database_tasks
|
||||
import octavia.tests.unit.base as base
|
||||
|
||||
|
||||
class TestDatabaseTasksQuota(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self._tf_failure_mock = mock.Mock(spec=failure.Failure)
|
||||
self.zero_pool_child_count = {'HM': 0, 'member': 0}
|
||||
|
||||
super(TestDatabaseTasksQuota, self).setUp()
|
||||
|
||||
@mock.patch('octavia.db.api.get_session', return_value='TEST')
|
||||
@mock.patch('octavia.db.repositories.Repositories.decrement_quota')
|
||||
@mock.patch('octavia.db.repositories.Repositories.check_quota_met')
|
||||
def _test_decrement_quota(self,
|
||||
task,
|
||||
data_model,
|
||||
mock_check_quota_met,
|
||||
mock_decrement_quota,
|
||||
mock_get_session):
|
||||
|
||||
project_id = uuidutils.generate_uuid()
|
||||
test_object = mock.MagicMock()
|
||||
test_object.project_id = project_id
|
||||
|
||||
# execute without exception
|
||||
mock_decrement_quota.reset_mock()
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_session = mock.MagicMock()
|
||||
mock_get_session_local.return_value = mock_session
|
||||
|
||||
if data_model == data_models.Pool:
|
||||
task.execute(test_object, self.zero_pool_child_count)
|
||||
else:
|
||||
task.execute(test_object)
|
||||
|
||||
mock_decrement_quota.assert_called_once_with(
|
||||
mock_session, data_model, project_id)
|
||||
|
||||
mock_session.commit.assert_called_once_with()
|
||||
|
||||
# execute with exception
|
||||
mock_decrement_quota.reset_mock()
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_session = mock.MagicMock()
|
||||
mock_get_session_local.return_value = mock_session
|
||||
|
||||
mock_decrement_quota.side_effect = (
|
||||
exceptions.OctaviaException('fail'))
|
||||
if data_model == data_models.Pool:
|
||||
self.assertRaises(exceptions.OctaviaException,
|
||||
task.execute,
|
||||
test_object,
|
||||
self.zero_pool_child_count)
|
||||
else:
|
||||
self.assertRaises(exceptions.OctaviaException,
|
||||
task.execute,
|
||||
test_object)
|
||||
|
||||
mock_decrement_quota.assert_called_once_with(
|
||||
mock_session, data_model, project_id)
|
||||
|
||||
mock_session.rollback.assert_called_once_with()
|
||||
|
||||
# revert with instance of failure
|
||||
mock_get_session.reset_mock()
|
||||
mock_check_quota_met.reset_mock()
|
||||
if data_model == data_models.Pool:
|
||||
task.revert(test_object,
|
||||
self.zero_pool_child_count,
|
||||
self._tf_failure_mock)
|
||||
else:
|
||||
task.revert(test_object, self._tf_failure_mock)
|
||||
self.assertFalse(mock_get_session.called)
|
||||
self.assertFalse(mock_check_quota_met.called)
|
||||
|
||||
# revert
|
||||
mock_check_quota_met.reset_mock()
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_session = mock.MagicMock()
|
||||
mock_lock_session = mock.MagicMock()
|
||||
mock_get_session_local.side_effect = [mock_session,
|
||||
mock_lock_session]
|
||||
|
||||
if data_model == data_models.Pool:
|
||||
task.revert(test_object, self.zero_pool_child_count, None)
|
||||
else:
|
||||
task.revert(test_object, None)
|
||||
|
||||
mock_check_quota_met.assert_called_once_with(
|
||||
mock_session, mock_lock_session, data_model,
|
||||
project_id)
|
||||
|
||||
mock_lock_session.commit.assert_called_once_with()
|
||||
|
||||
# revert with rollback
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_session = mock.MagicMock()
|
||||
mock_lock_session = mock.MagicMock()
|
||||
mock_get_session_local.side_effect = [mock_session,
|
||||
mock_lock_session]
|
||||
mock_check_quota_met.side_effect = (
|
||||
exceptions.OctaviaException('fail'))
|
||||
|
||||
if data_model == data_models.Pool:
|
||||
task.revert(test_object, self.zero_pool_child_count, None)
|
||||
else:
|
||||
task.revert(test_object, None)
|
||||
|
||||
mock_lock_session.rollback.assert_called_once_with()
|
||||
|
||||
# revert with db exception
|
||||
mock_check_quota_met.reset_mock()
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_get_session_local.side_effect = Exception('fail')
|
||||
|
||||
if data_model == data_models.Pool:
|
||||
task.revert(test_object, self.zero_pool_child_count, None)
|
||||
else:
|
||||
task.revert(test_object, None)
|
||||
|
||||
self.assertFalse(mock_check_quota_met.called)
|
||||
|
||||
def test_decrement_health_monitor_quota(self):
|
||||
task = database_tasks.DecrementHealthMonitorQuota()
|
||||
data_model = data_models.HealthMonitor
|
||||
self._test_decrement_quota(task, data_model)
|
||||
|
||||
def test_decrement_listener_quota(self):
|
||||
task = database_tasks.DecrementListenerQuota()
|
||||
data_model = data_models.Listener
|
||||
self._test_decrement_quota(task, data_model)
|
||||
|
||||
def test_decrement_loadbalancer_quota(self):
|
||||
task = database_tasks.DecrementLoadBalancerQuota()
|
||||
data_model = data_models.LoadBalancer
|
||||
self._test_decrement_quota(task, data_model)
|
||||
|
||||
def test_decrement_pool_quota(self):
|
||||
task = database_tasks.DecrementPoolQuota()
|
||||
data_model = data_models.Pool
|
||||
self._test_decrement_quota(task, data_model)
|
||||
|
||||
def test_decrement_member_quota(self):
|
||||
task = database_tasks.DecrementMemberQuota()
|
||||
data_model = data_models.Member
|
||||
self._test_decrement_quota(task, data_model)
|
||||
|
||||
@mock.patch('octavia.db.repositories.Repositories.decrement_quota')
|
||||
@mock.patch('octavia.db.repositories.Repositories.check_quota_met')
|
||||
def test_decrement_pool_quota_pool_children(self,
|
||||
mock_check_quota_met,
|
||||
mock_decrement_quota):
|
||||
pool_child_count = {'HM': 1, 'member': 2}
|
||||
project_id = uuidutils.generate_uuid()
|
||||
test_object = mock.MagicMock()
|
||||
test_object.project_id = project_id
|
||||
task = database_tasks.DecrementPoolQuota()
|
||||
mock_session = mock.MagicMock()
|
||||
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_get_session_local.return_value = mock_session
|
||||
|
||||
task.execute(test_object, pool_child_count)
|
||||
|
||||
calls = [mock.call(mock_session, data_models.Pool, project_id),
|
||||
mock.call(mock_session, data_models.HealthMonitor,
|
||||
project_id),
|
||||
mock.call(mock_session, data_models.Member, project_id,
|
||||
quantity=2)]
|
||||
|
||||
mock_decrement_quota.assert_has_calls(calls)
|
||||
|
||||
mock_session.commit.assert_called_once_with()
|
||||
|
||||
# revert
|
||||
mock_session.reset_mock()
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_lock_session = mock.MagicMock()
|
||||
mock_get_session_local.side_effect = [mock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session]
|
||||
|
||||
task.revert(test_object, pool_child_count, None)
|
||||
|
||||
calls = [mock.call(mock_session, mock_lock_session,
|
||||
data_models.Pool, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.HealthMonitor, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.Member, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.Member, project_id)]
|
||||
|
||||
mock_check_quota_met.assert_has_calls(calls)
|
||||
|
||||
self.assertEqual(4, mock_lock_session.commit.call_count)
|
||||
|
||||
# revert with health monitor quota exception
|
||||
mock_session.reset_mock()
|
||||
mock_check_quota_met.side_effect = [None, Exception('fail'), None,
|
||||
None]
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_lock_session = mock.MagicMock()
|
||||
mock_get_session_local.side_effect = [mock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session]
|
||||
|
||||
task.revert(test_object, pool_child_count, None)
|
||||
|
||||
calls = [mock.call(mock_session, mock_lock_session,
|
||||
data_models.Pool, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.HealthMonitor, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.Member, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.Member, project_id)]
|
||||
|
||||
mock_check_quota_met.assert_has_calls(calls)
|
||||
|
||||
self.assertEqual(3, mock_lock_session.commit.call_count)
|
||||
self.assertEqual(1, mock_lock_session.rollback.call_count)
|
||||
|
||||
# revert with member quota exception
|
||||
mock_session.reset_mock()
|
||||
mock_check_quota_met.side_effect = [None, None, None,
|
||||
Exception('fail')]
|
||||
with mock.patch('octavia.db.api.'
|
||||
'get_session') as mock_get_session_local:
|
||||
mock_lock_session = mock.MagicMock()
|
||||
mock_get_session_local.side_effect = [mock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session,
|
||||
mock_lock_session]
|
||||
|
||||
task.revert(test_object, pool_child_count, None)
|
||||
|
||||
calls = [mock.call(mock_session, mock_lock_session,
|
||||
data_models.Pool, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.HealthMonitor, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.Member, project_id),
|
||||
mock.call(mock_session, mock_lock_session,
|
||||
data_models.Member, project_id)]
|
||||
|
||||
mock_check_quota_met.assert_has_calls(calls)
|
||||
|
||||
self.assertEqual(3, mock_lock_session.commit.call_count)
|
||||
self.assertEqual(1, mock_lock_session.rollback.call_count)
|
||||
|
||||
def test_count_pool_children_for_quota(self):
|
||||
project_id = uuidutils.generate_uuid()
|
||||
member1 = data_models.Member(id=1, project_id=project_id)
|
||||
member2 = data_models.Member(id=2, project_id=project_id)
|
||||
healtmon = data_models.HealthMonitor(id=1, project_id=project_id)
|
||||
pool_no_children = data_models.Pool(id=1, project_id=project_id)
|
||||
pool_1_mem = data_models.Pool(id=1, project_id=project_id,
|
||||
members=[member1])
|
||||
pool_hm = data_models.Pool(id=1, project_id=project_id,
|
||||
health_monitor=healtmon)
|
||||
pool_hm_2_mem = data_models.Pool(id=1, project_id=project_id,
|
||||
health_monitor=healtmon,
|
||||
members=[member1, member2])
|
||||
task = database_tasks.CountPoolChildrenForQuota()
|
||||
|
||||
# Test pool with no children
|
||||
result = task.execute(pool_no_children)
|
||||
|
||||
self.assertEqual({'HM': 0, 'member': 0}, result)
|
||||
|
||||
# Test pool with one member
|
||||
result = task.execute(pool_1_mem)
|
||||
|
||||
self.assertEqual({'HM': 0, 'member': 1}, result)
|
||||
|
||||
# Test pool with health monitor and no members
|
||||
result = task.execute(pool_hm)
|
||||
|
||||
self.assertEqual({'HM': 1, 'member': 0}, result)
|
||||
|
||||
# Test pool with health monitor and two members
|
||||
result = task.execute(pool_hm_2_mem)
|
||||
|
||||
self.assertEqual({'HM': 1, 'member': 2}, result)
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Adds quota support to the Octavia API.
|
Loading…
Reference in New Issue
Block a user