Merge "ratelimit: Allow multiple placements"

This commit is contained in:
Zuul 2020-06-01 21:25:41 +00:00 committed by Gerrit Code Review
commit 6c1bc3949d
3 changed files with 73 additions and 50 deletions

View File

@ -470,7 +470,10 @@ use = egg:swift#s3api
# With either tempauth or your custom auth: # With either tempauth or your custom auth:
# - Put s3api just before your auth filter(s) in the pipeline # - Put s3api just before your auth filter(s) in the pipeline
# With keystone: # With keystone:
# - Put s3api and s3token before keystoneauth in the pipeline # - Put s3api and s3token before keystoneauth in the pipeline, but after
# auth_token
# If you have ratelimit enabled for Swift requests, you may want to place a
# second copy after auth to also ratelimit S3 requests.
# #
# Swift has no concept of the S3's resource owner; the resources # Swift has no concept of the S3's resource owner; the resources
# (i.e. containers and objects) created via the Swift API have no owner # (i.e. containers and objects) created via the Swift API have no owner

View File

@ -242,6 +242,10 @@ class RateLimitMiddleware(object):
if not self.memcache_client: if not self.memcache_client:
return None return None
if req.environ.get('swift.ratelimit.handled'):
return None
req.environ['swift.ratelimit.handled'] = True
try: try:
account_info = get_account_info(req.environ, self.app, account_info = get_account_info(req.environ, self.app,
swift_source='RL') swift_source='RL')

View File

@ -72,12 +72,20 @@ class FakeMemcache(object):
class FakeApp(object): class FakeApp(object):
skip_handled_check = False
def __call__(self, env, start_response): def __call__(self, env, start_response):
assert self.skip_handled_check or env.get('swift.ratelimit.handled')
start_response('200 OK', []) start_response('200 OK', [])
return [b'Some Content'] return [b'Some Content']
class FakeReq(object):
def __init__(self, method, env=None):
self.method = method
self.environ = env or {}
def start_response(*args): def start_response(*args):
pass pass
@ -160,36 +168,29 @@ class TestRateLimit(unittest.TestCase):
{'object_count': '5'} {'object_count': '5'}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache the_app.memcache_client = fake_memcache
req = lambda: None environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
req.environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
with mock.patch('swift.common.middleware.ratelimit.get_account_info', with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}): lambda *args, **kwargs: {}):
req.method = 'DELETE'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', None, None)), 0) FakeReq('DELETE', environ), 'a', None, None)), 0)
req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1) FakeReq('PUT', environ), 'a', 'c', None)), 1)
req.method = 'DELETE'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1) FakeReq('DELETE', environ), 'a', 'c', None)), 1)
req.method = 'GET'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', 'o')), 0) FakeReq('GET', environ), 'a', 'c', 'o')), 0)
req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', 'o')), 1) FakeReq('PUT', environ), 'a', 'c', 'o')), 1)
req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None, global_ratelimit=10)), 2) FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)), 2)
self.assertEqual(the_app.get_ratelimitable_key_tuples( self.assertEqual(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None, global_ratelimit=10)[1], FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)[1],
('ratelimit/global-write/a', 10)) ('ratelimit/global-write/a', 10))
req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples( self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None, global_ratelimit='notafloat')), 1) FakeReq('PUT', environ), 'a', 'c', None,
global_ratelimit='notafloat')), 1)
def test_memcached_container_info_dict(self): def test_memcached_container_info_dict(self):
mdict = headers_to_container_info({'x-container-object-count': '45'}) mdict = headers_to_container_info({'x-container-object-count': '45'})
@ -204,9 +205,8 @@ class TestRateLimit(unittest.TestCase):
{'container_size': 5} {'container_size': 5}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache the_app.memcache_client = fake_memcache
req = lambda: None req = FakeReq('PUT', {
req.method = 'PUT' 'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache})
req.environ = {'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache}
with mock.patch('swift.common.middleware.ratelimit.get_account_info', with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}): lambda *args, **kwargs: {}):
tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o') tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o')
@ -227,8 +227,8 @@ class TestRateLimit(unittest.TestCase):
req = Request.blank('/v1/a%s/c' % meth) req = Request.blank('/v1/a%s/c' % meth)
req.method = meth req.method = meth
req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'] = FakeMemcache()
make_app_call = lambda: self.test_ratelimit(req.environ, make_app_call = lambda: self.test_ratelimit(
start_response) req.environ.copy(), start_response)
begin = time.time() begin = time.time()
self._run(make_app_call, num_calls, current_rate, self._run(make_app_call, num_calls, current_rate,
check_time=bool(exp_time)) check_time=bool(exp_time))
@ -244,7 +244,7 @@ class TestRateLimit(unittest.TestCase):
req.method = 'PUT' req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].init_incr_return_neg = True req.environ['swift.cache'].init_incr_return_neg = True
make_app_call = lambda: self.test_ratelimit(req.environ, make_app_call = lambda: self.test_ratelimit(req.environ.copy(),
start_response) start_response)
begin = time.time() begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info', with mock.patch('swift.common.middleware.ratelimit.get_account_info',
@ -260,15 +260,15 @@ class TestRateLimit(unittest.TestCase):
'account_whitelist': 'a', 'account_whitelist': 'a',
'account_blacklist': 'b'} 'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
req = Request.blank('/')
with mock.patch.object(self.test_ratelimit, with mock.patch.object(self.test_ratelimit,
'memcache_client', FakeMemcache()): 'memcache_client', FakeMemcache()):
self.assertEqual( self.assertEqual(
self.test_ratelimit.handle_ratelimit(req, 'a', 'c', 'o'), self.test_ratelimit.handle_ratelimit(
Request.blank('/'), 'a', 'c', 'o'),
None) None)
self.assertEqual( self.assertEqual(
self.test_ratelimit.handle_ratelimit( self.test_ratelimit.handle_ratelimit(
req, 'b', 'c', 'o').status_int, Request.blank('/'), 'b', 'c', 'o').status_int,
497) 497)
def test_ratelimit_whitelist_sysmeta(self): def test_ratelimit_whitelist_sysmeta(self):
@ -331,7 +331,7 @@ class TestRateLimit(unittest.TestCase):
self.parent = parent self.parent = parent
def run(self): def run(self):
self.result = self.parent.test_ratelimit(req.environ, self.result = self.parent.test_ratelimit(req.environ.copy(),
start_response) start_response)
def get_fake_ratelimit(*args, **kwargs): def get_fake_ratelimit(*args, **kwargs):
@ -370,18 +370,17 @@ class TestRateLimit(unittest.TestCase):
# simulates 4 requests coming in at same time, then sleeping # simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info', with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}): lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down') self.assertEqual(r[0], b'Slow down')
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down') self.assertEqual(r[0], b'Slow down')
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
print(repr(r))
self.assertEqual(r[0], b'Some Content') self.assertEqual(r[0], b'Some Content')
def test_ratelimit_max_rate_double_container(self): def test_ratelimit_max_rate_double_container(self):
@ -404,17 +403,17 @@ class TestRateLimit(unittest.TestCase):
# simulates 4 requests coming in at same time, then sleeping # simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info', with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}): lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down') self.assertEqual(r[0], b'Slow down')
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down') self.assertEqual(r[0], b'Slow down')
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content') self.assertEqual(r[0], b'Some Content')
def test_ratelimit_max_rate_double_container_listing(self): def test_ratelimit_max_rate_double_container_listing(self):
@ -437,17 +436,17 @@ class TestRateLimit(unittest.TestCase):
lambda *args, **kwargs: {}): lambda *args, **kwargs: {}):
time_override = [0, 0, 0, 0, None] time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping # simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down') self.assertEqual(r[0], b'Slow down')
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down') self.assertEqual(r[0], b'Slow down')
mock_sleep(.1) mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response) r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content') self.assertEqual(r[0], b'Some Content')
mc = self.test_ratelimit.memcache_client mc = self.test_ratelimit.memcache_client
try: try:
@ -466,9 +465,6 @@ class TestRateLimit(unittest.TestCase):
the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache the_app.memcache_client = fake_memcache
req = lambda: None
req.method = 'PUT'
req.environ = {}
class rate_caller(threading.Thread): class rate_caller(threading.Thread):
@ -478,8 +474,8 @@ class TestRateLimit(unittest.TestCase):
def run(self): def run(self):
for j in range(num_calls): for j in range(num_calls):
self.result = the_app.handle_ratelimit(req, self.myname, self.result = the_app.handle_ratelimit(
'c', None) FakeReq('PUT'), self.myname, 'c', None)
with mock.patch('swift.common.middleware.ratelimit.get_account_info', with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}): lambda *args, **kwargs: {}):
@ -541,7 +537,9 @@ class TestRateLimit(unittest.TestCase):
current_rate = 13 current_rate = 13
num_calls = 5 num_calls = 5
conf_dict = {'account_ratelimit': current_rate} conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp()) fake_app = FakeApp()
fake_app.skip_handled_check = True
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(fake_app)
req = Request.blank('/v1/a') req = Request.blank('/v1/a')
req.environ['swift.cache'] = None req.environ['swift.cache'] = None
make_app_call = lambda: self.test_ratelimit(req.environ, make_app_call = lambda: self.test_ratelimit(req.environ,
@ -551,6 +549,24 @@ class TestRateLimit(unittest.TestCase):
time_took = time.time() - begin time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
def test_already_handled(self):
current_rate = 13
num_calls = 5
conf_dict = {'container_listing_ratelimit_0': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
fake_cache = FakeMemcache()
fake_cache.set(
get_cache_key('a', 'c'),
{'object_count': 1})
req = Request.blank('/v1/a/c', environ={'swift.cache': fake_cache})
req.environ['swift.ratelimit.handled'] = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
def test_restarting_memcache(self): def test_restarting_memcache(self):
current_rate = 2 current_rate = 2
num_calls = 5 num_calls = 5