Converted fully over to what the internet assures me is the American spelling of 'canceled' -- this only affects internal variables. Also cleaned up the meticulous timer accounting of the hub tests so they stop failing after other tests are run. Also added new parameter to the abort method because it seemed to be the dominant mode of operation.

This commit is contained in:
Ryan Williams
2010-05-05 21:44:35 -07:00
parent 1a6f4e491f
commit f8ced5cb9b
9 changed files with 76 additions and 48 deletions

View File

@@ -23,8 +23,9 @@ def sleep(seconds=0):
occasionally; otherwise nothing else will run.
"""
hub = hubs.get_hub()
assert hub.greenlet is not greenlet.getcurrent(), 'do not call blocking functions from the mainloop'
timer = hub.schedule_call_global(seconds, greenlet.getcurrent().switch)
current = greenlet.getcurrent()
assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
timer = hub.schedule_call_global(seconds, current.switch)
try:
hub.switch()
finally:

View File

@@ -63,7 +63,7 @@ class BaseHub(object):
self.next_timers = []
self.lclass = FdListener
self.debug_exceptions = True
self.timers_cancelled = 0
self.timers_canceled = 0
def add(self, evtype, fileno, cb):
""" Signals an intent to or write a particular file descriptor.
@@ -183,18 +183,30 @@ class BaseHub(object):
else:
self.wait(0)
else:
self.canceled_timers = 0
del self.timers[:]
del self.next_timers[:]
finally:
self.running = False
self.stopping = False
def abort(self):
"""Stop the runloop. If run is executing, it will exit after completing
the next runloop iteration.
def abort(self, wait=False):
"""Stop the runloop. If run is executing, it will exit after
completing the next runloop iteration.
Set *wait* to True to cause abort to switch to the hub immediately and
wait until it's finished processing. Waiting for the hub will only
work from the main greenthread; all other greenthreads will become
unreachable.
"""
if self.running:
self.stopping = True
if wait:
# schedule an immediate timer just so the hub doesn't sleep
self.schedule_call_global(0, lambda: None)
# switch to it; when done the hub will switch back to its parent,
# the main greenlet
self.switch()
def squelch_generic_exception(self, exc_info):
if self.debug_exceptions:
@@ -215,10 +227,10 @@ class BaseHub(object):
pass
def timer_canceled(self, timer):
self.timers_cancelled += 1
self.timers_canceled += 1
len_timers = len(self.timers)
if len_timers > 1000 and len_timers/2 <= self.timers_cancelled:
self.timers_cancelled = 0
if len_timers > 1000 and len_timers/2 <= self.timers_canceled:
self.timers_canceled = 0
self.timers = [t for t in self.timers if not t[1].called]
heapq.heapify(self.timers)
self.timer_finished(timer)
@@ -227,6 +239,9 @@ class BaseHub(object):
heappush = heapq.heappush
t = self.timers
for item in self.next_timers:
if item[1].called:
self.timers_canceled -= 1
else:
heappush(t, item)
del self.next_timers[:]
@@ -244,7 +259,7 @@ class BaseHub(object):
def schedule_call_global(self, seconds, cb, *args, **kw):
"""Schedule a callable to be called after 'seconds' seconds have
elapsed. The timer will NOT be cancelled if the current greenlet has
elapsed. The timer will NOT be canceled if the current greenlet has
exited before the timer fires.
seconds: The number of seconds to wait.
cb: The callable to call after the given time.
@@ -273,7 +288,7 @@ class BaseHub(object):
try:
try:
if timer.called:
self.timers_cancelled -= 1
self.timers_canceled -= 1
else:
timer()
except self.SYSTEM_EXCEPTIONS:

View File

@@ -6,7 +6,6 @@ useful for debugging leaking timers, to find out where the timer was set up. """
_g_debug = False
class Timer(object):
#__slots__ = ['seconds', 'tpl', 'called', 'cancelled', 'scheduled_time', 'greenlet', 'traceback', 'impltimer']
def __init__(self, seconds, cb, *args, **kw):
"""Create a timer.
seconds: The minimum number of seconds to wait before calling
@@ -64,7 +63,7 @@ class Timer(object):
def cancel(self):
"""Prevent this timer from being called. If the timer has already
been called or cancelled, has no effect.
been called or canceled, has no effect.
"""
if not self.called:
self.called = True

View File

@@ -52,7 +52,7 @@ class Timeout(BaseException):
def start(self):
"""Schedule the timeout. This is called on construction, so
it should not be called explicitly, unless the timer has been
cancelled."""
canceled."""
assert not self.pending, \
'%r is already started; to restart it, cancel it first' % self
if self.seconds is None: # "fake" timeout (never expires)
@@ -77,7 +77,7 @@ class Timeout(BaseException):
"""If the timeout is pending, cancel it. If not using
Timeouts in ``with`` statements, always call cancel() in a
``finally`` after the block of code that is getting timed out.
If not cancelled, the timeout will be raised later on, in some
If not canceled, the timeout will be raised later on, in some
unexpected section of the application."""
if self.timer is not None:
self.timer.cancel()

View File

@@ -129,6 +129,21 @@ class LimitedTestCase(unittest.TestCase):
print debug.format_hub_timers()
print debug.format_hub_listeners()
def assert_less_than(self, a,b,msg=None):
if msg:
self.assert_(a<b, msg)
else:
self.assert_(a<b, "%s not less than %s" % (a,b))
assertLessThan = assert_less_than
def assert_less_than_equal(self, a,b,msg=None):
if msg:
self.assert_(a<=b, msg)
else:
self.assert_(a<=b, "%s not less than or equal to %s" % (a,b))
assertLessThanEqual = assert_less_than_equal
def verify_hub_empty():
from eventlet import hubs
@@ -195,4 +210,3 @@ def get_database_auth():
except IOError:
pass
return retval

View File

@@ -20,9 +20,8 @@ def check_hub():
assert not dct, "hub.%s not empty: %s" % (nm, dct)
# Stop the runloop (unless it's twistedhub which does not support that)
if not getattr(hub, 'uses_twisted_reactor', None):
hub.abort()
api.sleep(0)
### ??? assert not hubs.get_hub().running
hub.abort(True)
assert not hub.running
class TestApi(TestCase):

View File

@@ -11,42 +11,48 @@ def noop():
class TestTimerCleanup(LimitedTestCase):
def test_cancel_accumulated(self):
hub = hubs.get_hub()
start_timers = len(hub.timers)
start_cancelled = hub.timers_cancelled
stimers = hub.get_timers_count()
scanceled = hub.timers_canceled
for i in xrange(2000):
t = hubs.get_hub().schedule_call_global(60, noop)
eventlet.sleep()
self.assert_(hub.timers_cancelled < len(hub.timers))
self.assert_less_than_equal(hub.timers_canceled - scanceled,
hub.get_timers_count() - stimers)
t.cancel()
self.assert_(hub.timers_cancelled < len(hub.timers))
# there should be fewer than 1000 new timers and cancelled
self.assert_(len(hub.timers) < start_timers + 1000)
self.assert_(hub.timers_cancelled < 1000)
self.assert_less_than_equal(hub.timers_canceled - scanceled,
hub.get_timers_count() - stimers)
# there should be fewer than 1000 new timers and canceled
self.assert_less_than_equal(hub.get_timers_count(), stimers + 1000)
self.assert_less_than_equal(hub.timers_canceled, 1000)
def test_cancel_proportion(self):
# if fewer than half the pending timers are cancelled, it should
# if fewer than half the pending timers are canceled, it should
# not clean them out
hub = hubs.get_hub()
uncancelled_timers = []
start_timers = len(hub.timers)
start_cancelled = hub.timers_cancelled
uncanceled_timers = []
stimers = hub.get_timers_count()
scanceled = hub.timers_canceled
for i in xrange(1000):
# 2/3rds of new timers are uncancelled
# 2/3rds of new timers are uncanceled
t = hubs.get_hub().schedule_call_global(60, noop)
t2 = hubs.get_hub().schedule_call_global(60, noop)
t3 = hubs.get_hub().schedule_call_global(60, noop)
eventlet.sleep()
self.assert_(hub.timers_cancelled < len(hub.timers))
self.assert_less_than_equal(hub.timers_canceled - scanceled,
hub.get_timers_count() - stimers)
t.cancel()
self.assert_(hub.timers_cancelled < len(hub.timers))
uncancelled_timers.append(t2)
uncancelled_timers.append(t3)
# 3000 new timers, plus one new one from the sleeps
self.assertEqual(len(hub.timers), start_timers + 3001)
self.assertEqual(hub.timers_cancelled, start_cancelled + 1000)
for t in uncancelled_timers:
self.assert_less_than_equal(hub.timers_canceled - scanceled,
hub.get_timers_count() - stimers)
uncanceled_timers.append(t2)
uncanceled_timers.append(t3)
# 3000 new timers, plus a few extras
self.assert_less_than_equal(stimers + 3000,
hub.get_timers_count())
self.assertEqual(hub.timers_canceled, scanceled + 1000)
for t in uncanceled_timers:
t.cancel()
self.assert_(hub.timers_cancelled < len(hub.timers))
self.assert_less_than_equal(hub.timers_canceled - scanceled,
hub.get_timers_count() - stimers)
eventlet.sleep()

View File

@@ -193,9 +193,6 @@ class TestSaranwrap(LimitedTestCase):
prox.err_string('goodbye')
self.assert_server_exists(prox)
def assertLessThan(self, a, b):
self.assert_(a < b, "%s is not less than %s" % (a, b))
@skip_on_windows
@skip_with_pyevent
def test_status(self):

View File

@@ -182,9 +182,6 @@ class TestTpool(LimitedTestCase):
prox = tpool.Proxy(tpool_test)
self.assertRaises(RuntimeError, prox.raise_exception)
def assertLessThan(self, a, b):
self.assert_(a < b, "%s is not less than %s" % (a, b))
@skip_with_pyevent
def test_variable_and_keyword_arguments_with_function_calls(self):
import optparse