Fixed a few rare errors revealed by copious testing, moved long-duration tpool tests to their own testcase with longer timeout.
This commit is contained in:
@@ -11,7 +11,10 @@ def get_fileno(obj):
|
|||||||
raise TypeError("Expected int or long, got " + type(obj))
|
raise TypeError("Expected int or long, got " + type(obj))
|
||||||
return obj
|
return obj
|
||||||
else:
|
else:
|
||||||
return f()
|
rv = f()
|
||||||
|
if not isinstance(rv, (int, long)):
|
||||||
|
raise TypeError("Expected int or long, got " + type(rv))
|
||||||
|
return rv
|
||||||
|
|
||||||
def select(read_list, write_list, error_list, timeout=None):
|
def select(read_list, write_list, error_list, timeout=None):
|
||||||
hub = get_hub()
|
hub = get_hub()
|
||||||
|
@@ -56,7 +56,7 @@ class Hub(BaseHub):
|
|||||||
super(Hub, self).remove_descriptor(fileno)
|
super(Hub, self).remove_descriptor(fileno)
|
||||||
try:
|
try:
|
||||||
self.poll.unregister(fileno)
|
self.poll.unregister(fileno)
|
||||||
except KeyError:
|
except (KeyError, ValueError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def wait(self, seconds=None):
|
def wait(self, seconds=None):
|
||||||
|
@@ -26,8 +26,10 @@ one = 1
|
|||||||
two = 2
|
two = 2
|
||||||
three = 3
|
three = 3
|
||||||
|
|
||||||
|
def noop():
|
||||||
|
pass
|
||||||
|
|
||||||
class TestTpool(LimitedTestCase):
|
class TestTpool(LimitedTestCase):
|
||||||
TEST_TIMEOUT=3
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
tpool.setup()
|
tpool.setup()
|
||||||
debug.hub_exceptions(True)
|
debug.hub_exceptions(True)
|
||||||
@@ -38,33 +40,6 @@ class TestTpool(LimitedTestCase):
|
|||||||
tpool.killall()
|
tpool.killall()
|
||||||
debug.hub_exceptions(False)
|
debug.hub_exceptions(False)
|
||||||
|
|
||||||
@skip_with_pyevent
|
|
||||||
def test_a_buncha_stuff(self):
|
|
||||||
assert_ = self.assert_
|
|
||||||
class Dummy(object):
|
|
||||||
def foo(self,when,token=None):
|
|
||||||
assert_(token is not None)
|
|
||||||
time.sleep(random.random()/200.0)
|
|
||||||
return token
|
|
||||||
|
|
||||||
def sender_loop(loopnum):
|
|
||||||
obj = tpool.Proxy(Dummy())
|
|
||||||
count = 100
|
|
||||||
for n in xrange(count):
|
|
||||||
api.sleep(random.random()/200.0)
|
|
||||||
now = time.time()
|
|
||||||
token = loopnum * count + n
|
|
||||||
rv = obj.foo(now,token=token)
|
|
||||||
self.assertEquals(token, rv)
|
|
||||||
api.sleep(random.random()/200.0)
|
|
||||||
|
|
||||||
pile = eventlet.GreenPile(10)
|
|
||||||
for i in xrange(10):
|
|
||||||
pile.spawn(sender_loop,i)
|
|
||||||
results = list(pile)
|
|
||||||
self.assertEquals(len(results), 10)
|
|
||||||
|
|
||||||
|
|
||||||
@skip_with_pyevent
|
@skip_with_pyevent
|
||||||
def test_wrap_tuple(self):
|
def test_wrap_tuple(self):
|
||||||
my_tuple = (1, 2)
|
my_tuple = (1, 2)
|
||||||
@@ -189,35 +164,56 @@ class TestTpool(LimitedTestCase):
|
|||||||
def test_killall(self):
|
def test_killall(self):
|
||||||
tpool.killall()
|
tpool.killall()
|
||||||
tpool.setup()
|
tpool.setup()
|
||||||
|
|
||||||
|
|
||||||
|
class TpoolLongTests(LimitedTestCase):
|
||||||
|
TEST_TIMEOUT=60
|
||||||
|
@skip_with_pyevent
|
||||||
|
def test_a_buncha_stuff(self):
|
||||||
|
assert_ = self.assert_
|
||||||
|
class Dummy(object):
|
||||||
|
def foo(self,when,token=None):
|
||||||
|
assert_(token is not None)
|
||||||
|
time.sleep(random.random()/200.0)
|
||||||
|
return token
|
||||||
|
|
||||||
|
def sender_loop(loopnum):
|
||||||
|
obj = tpool.Proxy(Dummy())
|
||||||
|
count = 100
|
||||||
|
for n in xrange(count):
|
||||||
|
api.sleep(random.random()/200.0)
|
||||||
|
now = time.time()
|
||||||
|
token = loopnum * count + n
|
||||||
|
rv = obj.foo(now,token=token)
|
||||||
|
self.assertEquals(token, rv)
|
||||||
|
api.sleep(random.random()/200.0)
|
||||||
|
|
||||||
|
pile = eventlet.GreenPile(10)
|
||||||
|
for i in xrange(10):
|
||||||
|
pile.spawn(sender_loop,i)
|
||||||
|
results = list(pile)
|
||||||
|
self.assertEquals(len(results), 10)
|
||||||
|
|
||||||
@skipped
|
@skipped
|
||||||
def test_benchmark(self):
|
def test_benchmark(self):
|
||||||
""" Benchmark computing the amount of overhead tpool adds to function calls."""
|
""" Benchmark computing the amount of overhead tpool adds to function calls."""
|
||||||
iterations = 10000
|
iterations = 10000
|
||||||
def bench(f, *args, **kw):
|
import timeit
|
||||||
for i in xrange(iterations):
|
imports = """
|
||||||
f(*args, **kw)
|
from tests.tpool_test import noop
|
||||||
def noop():
|
from eventlet.tpool import execute
|
||||||
pass
|
"""
|
||||||
|
t = timeit.Timer("noop()", imports)
|
||||||
|
results = t.repeat(repeat=3, number=iterations)
|
||||||
|
best_normal = min(results)
|
||||||
|
|
||||||
normal_results = []
|
t = timeit.Timer("execute(noop)", imports)
|
||||||
tpool_results = []
|
results = t.repeat(repeat=3, number=iterations)
|
||||||
for i in xrange(3):
|
best_tpool = min(results)
|
||||||
start = time.time()
|
|
||||||
bench(noop)
|
|
||||||
end = time.time()
|
|
||||||
normal_results.append(end-start)
|
|
||||||
|
|
||||||
start = time.time()
|
tpool_overhead = (best_tpool-best_normal)/iterations
|
||||||
bench(tpool.execute, noop)
|
|
||||||
end = time.time()
|
|
||||||
tpool_results.append(end-start)
|
|
||||||
|
|
||||||
avg_normal = sum(normal_results)/len(normal_results)
|
|
||||||
avg_tpool = sum(tpool_results)/len(tpool_results)
|
|
||||||
tpool_overhead = (avg_tpool-avg_normal)/iterations
|
|
||||||
print "%s iterations\nTpool overhead is %s seconds per call. Normal: %s; Tpool: %s" % (
|
print "%s iterations\nTpool overhead is %s seconds per call. Normal: %s; Tpool: %s" % (
|
||||||
iterations, tpool_overhead, normal_results, tpool_results)
|
iterations, tpool_overhead, best_normal, best_tpool)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
Reference in New Issue
Block a user