adding documentation
This commit is contained in:
@@ -24,6 +24,7 @@ Overview:
|
||||
overview_reaper
|
||||
overview_auth
|
||||
overview_replication
|
||||
rate_limiting
|
||||
|
||||
Development:
|
||||
|
||||
|
67
doc/source/rate_limiting.rst
Normal file
67
doc/source/rate_limiting.rst
Normal file
@@ -0,0 +1,67 @@
|
||||
=============
|
||||
Rate Limiting
|
||||
=============
|
||||
|
||||
Rate limiting in swift is implemented as a pluggable middleware. Rate
|
||||
limiting is performed on requests that result in database writes to the
|
||||
account and container sqlite dbs. It uses memcached and is dependant on
|
||||
the proxy servers having highly synchronized time. The rate limits are
|
||||
limited by the accuracy of the proxy server clocks.
|
||||
|
||||
--------------
|
||||
Configuration
|
||||
--------------
|
||||
|
||||
All configuration is optional. If no account or container limits are provided
|
||||
there will be no rate limiting. Configuration available:
|
||||
|
||||
====================== ========= =============================================
|
||||
Option Default Description
|
||||
---------------------- --------- ---------------------------------------------
|
||||
clock_accuracy 1000 Represents how accurate the proxy servers'
|
||||
system clocks are with each other. 1000 means
|
||||
that all the proxies' clock are accurate to
|
||||
each other within 1 millisecond. No
|
||||
ratelimit should be higher than the clock
|
||||
accuracy.
|
||||
max_sleep_time_seconds 60 App will immediately return a 498 response
|
||||
if the necessary sleep time ever exceeds
|
||||
the given max_sleep_time_seconds.
|
||||
account_ratelimit 0 If set, will limit all requests to
|
||||
/account_name and PUTs to
|
||||
/account_name/container_name. Number is in
|
||||
requests per second
|
||||
account_whitelist '' Comma separated lists of account names that
|
||||
will not be rate limited.
|
||||
account_blacklist '' Comma separated lists of account names that
|
||||
will not be allowed. Returns a 497 response.
|
||||
container_limit_size '' When set with container_limit_x = r:
|
||||
for containers of size x, limit requests per
|
||||
second to r. Will limit GET and HEAD
|
||||
requests to /account_name/container_name and
|
||||
PUTs and DELETEs to
|
||||
/account_name/container_name/object_name
|
||||
====================== ========= =============================================
|
||||
|
||||
The container rate limits are linearly interpolated from the values given. A
|
||||
sample container rate limiting could be:
|
||||
|
||||
container_limit_100 = 100
|
||||
|
||||
container_limit_200 = 50
|
||||
|
||||
container_limit_500 = 20
|
||||
|
||||
This would result in
|
||||
|
||||
================ ============
|
||||
Container Size Rate Limit
|
||||
---------------- ------------
|
||||
0-99 No limiting
|
||||
100 100
|
||||
150 75
|
||||
500 20
|
||||
1000 20
|
||||
================ ============
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
# key_file = /etc/swift/proxy.key
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache auth proxy-server
|
||||
pipeline = healthcheck cache ratelimit auth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
@@ -28,12 +28,6 @@ use = egg:swift#proxy
|
||||
# error_suppression_interval = 60
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
# How many ops per second to one container (as a float)
|
||||
# rate_limit = 20000.0
|
||||
# How many ops per second for account-level operations
|
||||
# account_rate_limit = 200.0
|
||||
# rate_limit_account_whitelist = acct1,acct2,etc
|
||||
# rate_limit_account_blacklist = acct3,acct4,etc
|
||||
|
||||
[filter:auth]
|
||||
use = egg:swift#auth
|
||||
@@ -60,21 +54,23 @@ use = egg:swift#memcache
|
||||
[filter:ratelimit]
|
||||
use = egg:swift#ratelimit
|
||||
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||
# clock accuracy.
|
||||
clock_accuracy = 1000
|
||||
max_sleep_time_seconds = 60
|
||||
# clock_accuracy = 1000
|
||||
# max_sleep_time_seconds = 60
|
||||
|
||||
# account_ratelimit of 0 means disabled
|
||||
# account_ratelimit = 0
|
||||
|
||||
account_ratelimit = 200
|
||||
# these are comma separated lists of account names
|
||||
account_whitelist = a,b
|
||||
# account_blacklist =
|
||||
# account_whitelist = a,b
|
||||
# account_blacklist = c,d
|
||||
|
||||
# with container_limit_x = r
|
||||
# for containers of size x limit requests per second to r. The container
|
||||
# for containers of size x limit requests per second to r. The container
|
||||
# rate will be linearly interpolated from the values given. With the values
|
||||
# below, a container of size 5 will get a rate of 75.
|
||||
container_limit_0 = 100
|
||||
container_limit_10 = 50
|
||||
container_limit_50 = 20
|
||||
# container_limit_0 = 100
|
||||
# container_limit_10 = 50
|
||||
# container_limit_50 = 20
|
||||
|
@@ -25,6 +25,9 @@ class MaxSleepTimeHit(Exception):
|
||||
class RateLimitMiddleware(object):
|
||||
"""
|
||||
Rate limiting middleware
|
||||
|
||||
Rate limits requests on both an Account and Container level. Limits are
|
||||
configurable.
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf, logger=None):
|
||||
@@ -70,6 +73,9 @@ class RateLimitMiddleware(object):
|
||||
self.container_limits.append((cur_size, cur_rate, line_func))
|
||||
|
||||
def get_container_maxrate(self, container_size):
|
||||
"""
|
||||
Returns number of requests allowed per second for given container size.
|
||||
"""
|
||||
last_func = None
|
||||
if container_size:
|
||||
container_size = int(container_size)
|
||||
@@ -82,11 +88,17 @@ class RateLimitMiddleware(object):
|
||||
return last_func(container_size)
|
||||
return None
|
||||
|
||||
def get_ratelimitable_key_tuples(self, req_method,
|
||||
account_name, container_name, obj_name):
|
||||
def get_ratelimitable_key_tuples(self, req_method, account_name,
|
||||
container_name=None,
|
||||
obj_name=None):
|
||||
"""
|
||||
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
||||
Returns a list of key (used in memcache), ratelimit tuples. Keys
|
||||
should be checked in order.
|
||||
|
||||
:param req_method: HTTP method
|
||||
:param account_name: account name from path
|
||||
:param container_name: container name from path
|
||||
:param obj_name: object name from path
|
||||
"""
|
||||
keys = []
|
||||
if self.account_rate_limit and account_name and (
|
||||
@@ -112,6 +124,14 @@ class RateLimitMiddleware(object):
|
||||
return keys
|
||||
|
||||
def _get_sleep_time(self, key, max_rate):
|
||||
'''
|
||||
Returns the amount of time (a float in seconds) that the app
|
||||
should sleep. Throws a MaxSleepTimeHit exception if maximum
|
||||
sleep time is exceeded.
|
||||
|
||||
:param key: a memcache key
|
||||
:param max_rate: maximum rate allowed in requests per second
|
||||
'''
|
||||
now_m = int(round(time.time() * self.clock_accuracy))
|
||||
time_per_request_m = int(round(self.clock_accuracy / max_rate))
|
||||
running_time_m = self.memcache_client.incr(key,
|
||||
@@ -135,6 +155,13 @@ class RateLimitMiddleware(object):
|
||||
return float(need_to_sleep_m) / self.clock_accuracy
|
||||
|
||||
def handle_rate_limit(self, req, account_name, container_name, obj_name):
|
||||
'''
|
||||
Performs rate limiting and account white/black listing. Sleeps
|
||||
if necessary.
|
||||
:param account_name: account name from path
|
||||
:param container_name: container name from path
|
||||
:param obj_name: object name from path
|
||||
'''
|
||||
if account_name in self.rate_limit_blacklist:
|
||||
self.logger.error('Returning 497 because of blacklisting')
|
||||
return Response(status='497 Blacklisted',
|
||||
@@ -142,10 +169,11 @@ class RateLimitMiddleware(object):
|
||||
if account_name in self.rate_limit_whitelist:
|
||||
return None
|
||||
|
||||
for key, max_rate in self.get_ratelimitable_key_tuples(req.method,
|
||||
account_name,
|
||||
container_name,
|
||||
obj_name):
|
||||
for key, max_rate in self.get_ratelimitable_key_tuples(
|
||||
req.method,
|
||||
account_name,
|
||||
container_name=container_name,
|
||||
obj_name=obj_name):
|
||||
try:
|
||||
need_to_sleep = self._get_sleep_time(key, max_rate)
|
||||
if need_to_sleep > 0:
|
||||
@@ -160,6 +188,13 @@ class RateLimitMiddleware(object):
|
||||
return None
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
"""
|
||||
WSGI entry point.
|
||||
Wraps env in webob.Request object and passes it down.
|
||||
|
||||
:param env: WSGI environment dictionary
|
||||
:param start_response: WSGI callable
|
||||
"""
|
||||
req = Request(env)
|
||||
if self.memcache_client is None:
|
||||
self.memcache_client = cache_from_env(env)
|
||||
@@ -174,6 +209,9 @@ class RateLimitMiddleware(object):
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""
|
||||
paste.deploy app factory for creating WSGI proxy apps.
|
||||
"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
|
@@ -88,6 +88,7 @@ def delay_denial(func):
|
||||
return func(*a, **kw)
|
||||
return wrapped
|
||||
|
||||
|
||||
def get_container_memcache_key(account, container):
|
||||
path = '/%s/%s' % (account, container)
|
||||
return 'container%s' % path
|
||||
@@ -290,8 +291,8 @@ class Controller(object):
|
||||
cache_timeout = self.app.recheck_container_existence
|
||||
else:
|
||||
cache_timeout = self.app.recheck_container_existence * 0.1
|
||||
self.app.memcache.set(cache_key, {'status': result_code,
|
||||
'read_acl': read_acl,
|
||||
self.app.memcache.set(cache_key, {'status': result_code,
|
||||
'read_acl': read_acl,
|
||||
'write_acl': write_acl,
|
||||
'container_size': container_size},
|
||||
timeout=cache_timeout)
|
||||
@@ -430,6 +431,7 @@ class Controller(object):
|
||||
if req.method == 'GET' and source.status in (200, 206):
|
||||
res = Response(request=req, conditional_response=True)
|
||||
res.bytes_transferred = 0
|
||||
|
||||
def file_iter():
|
||||
try:
|
||||
while True:
|
||||
@@ -877,13 +879,13 @@ class ContainerController(Controller):
|
||||
req.path_info, self.app.container_ring.replica_count)
|
||||
|
||||
# set the memcache container size for ratelimiting if missing
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
cache_value = self.app.memcache.get(cache_key)
|
||||
if not isinstance(cache_value, dict):
|
||||
self.app.memcache.set(cache_key,
|
||||
{'status': resp.status_int,
|
||||
'read_acl': resp.headers.get('x-container-read'),
|
||||
self.app.memcache.set(cache_key,
|
||||
{'status': resp.status_int,
|
||||
'read_acl': resp.headers.get('x-container-read'),
|
||||
'write_acl': resp.headers.get('x-container-write'),
|
||||
'container_size': resp.headers.get('x-container-object-count')},
|
||||
timeout=self.app.recheck_container_existence)
|
||||
@@ -969,9 +971,9 @@ class ContainerController(Controller):
|
||||
statuses.append(503)
|
||||
reasons.append('')
|
||||
bodies.append('')
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.delete(cache_key)
|
||||
self.app.memcache.delete(cache_key)
|
||||
return self.best_response(req, statuses, reasons, bodies,
|
||||
'Container PUT')
|
||||
|
||||
@@ -1023,7 +1025,7 @@ class ContainerController(Controller):
|
||||
statuses.append(503)
|
||||
reasons.append('')
|
||||
bodies.append('')
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.delete(cache_key)
|
||||
return self.best_response(req, statuses, reasons, bodies,
|
||||
@@ -1079,7 +1081,7 @@ class ContainerController(Controller):
|
||||
statuses.append(503)
|
||||
reasons.append('')
|
||||
bodies.append('')
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
cache_key = get_container_memcache_key(self.account_name,
|
||||
self.container_name)
|
||||
self.app.memcache.delete(cache_key)
|
||||
resp = self.best_response(req, statuses, reasons, bodies,
|
||||
@@ -1413,6 +1415,7 @@ class Application(BaseApplication):
|
||||
trans_time,
|
||||
)))
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
"""paste.deploy app factory for creating WSGI proxy apps."""
|
||||
conf = global_conf.copy()
|
||||
|
Reference in New Issue
Block a user