PEP-8 fixes

This commit is contained in:
Sergey Shepelev
2014-07-18 15:21:26 +04:00
parent e9486899e0
commit 203e629212
127 changed files with 992 additions and 728 deletions

View File

@@ -19,6 +19,7 @@ if len(sys.argv) >= 2:
l = []
def work(n):
l.append(n)

View File

@@ -97,7 +97,6 @@ if __name__ == "__main__":
parser.add_option('-t', '--tries', type='int', dest='tries',
default=TRIES)
opts, args = parser.parse_args()
BYTES = opts.bytes
SIZE = opts.size

View File

@@ -10,13 +10,15 @@ def cleanup():
iters = 10000
best = benchmarks.measure_best(5, iters,
best = benchmarks.measure_best(
5, iters,
'pass',
cleanup,
eventlet.sleep)
print("eventlet.sleep (main)", best[eventlet.sleep])
gt = eventlet.spawn(benchmarks.measure_best,5, iters,
gt = eventlet.spawn(
benchmarks.measure_best, 5, iters,
'pass',
cleanup,
eventlet.sleep)
@@ -40,7 +42,8 @@ def run_spawn_n_kw():
eventlet.spawn_n(dummy, i=1)
best = benchmarks.measure_best(5, iters,
best = benchmarks.measure_best(
5, iters,
'pass',
cleanup,
run_spawn_n,
@@ -71,7 +74,8 @@ def cleanup_pool():
pool.waitall()
best = benchmarks.measure_best(3, iters,
best = benchmarks.measure_best(
3, iters,
setup,
cleanup_pool,
run_pool_spawn,

View File

@@ -15,43 +15,52 @@ DATA_DIR = 'plot_data'
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
def write_result(filename, best):
fd = open(os.path.join(DATA_DIR, filename), 'w')
fd.write('YVALUE=%s' % best)
fd.close()
def cleanup():
eventlet.sleep(0.2)
iters = 10000
best = benchmarks.measure_best(5, iters,
best = benchmarks.measure_best(
5, iters,
'pass',
cleanup,
eventlet.sleep)
write_result('eventlet.sleep_main', best[eventlet.sleep])
gt = eventlet.spawn(benchmarks.measure_best,5, iters,
gt = eventlet.spawn(
benchmarks.measure_best, 5, iters,
'pass',
cleanup,
eventlet.sleep)
best = gt.wait()
write_result('eventlet.sleep_gt', best[eventlet.sleep])
def dummy(i=None):
return i
def run_spawn():
eventlet.spawn(dummy, 1)
def run_spawn_n():
eventlet.spawn_n(dummy, 1)
def run_spawn_n_kw():
eventlet.spawn_n(dummy, i=1)
best = benchmarks.measure_best(5, iters,
best = benchmarks.measure_best(
5, iters,
'pass',
cleanup,
run_spawn_n,
@@ -62,21 +71,27 @@ write_result('eventlet.spawn_n', best[run_spawn_n])
write_result('eventlet.spawn_n_kw', best[run_spawn_n_kw])
pool = None
def setup():
global pool
pool = eventlet.GreenPool(iters)
def run_pool_spawn():
pool.spawn(dummy, 1)
def run_pool_spawn_n():
pool.spawn_n(dummy, 1)
def cleanup_pool():
pool.waitall()
best = benchmarks.measure_best(3, iters,
best = benchmarks.measure_best(
3, iters,
setup,
cleanup_pool,
run_pool_spawn,

View File

@@ -18,25 +18,29 @@ __all__ = [
'ssl_listener', 'tcp_listener', 'trampoline',
'unspew', 'use_hub', 'with_timeout', 'timeout']
warnings.warn("eventlet.api is deprecated! Nearly everything in it has moved "
warnings.warn(
"eventlet.api is deprecated! Nearly everything in it has moved "
"to the eventlet module.", DeprecationWarning, stacklevel=2)
def get_hub(*a, **kw):
warnings.warn("eventlet.api.get_hub has moved to eventlet.hubs.get_hub",
warnings.warn(
"eventlet.api.get_hub has moved to eventlet.hubs.get_hub",
DeprecationWarning, stacklevel=2)
return hubs.get_hub(*a, **kw)
def get_default_hub(*a, **kw):
warnings.warn("eventlet.api.get_default_hub has moved to"
warnings.warn(
"eventlet.api.get_default_hub has moved to"
" eventlet.hubs.get_default_hub",
DeprecationWarning, stacklevel=2)
return hubs.get_default_hub(*a, **kw)
def use_hub(*a, **kw):
warnings.warn("eventlet.api.use_hub has moved to eventlet.hubs.use_hub",
warnings.warn(
"eventlet.api.use_hub has moved to eventlet.hubs.use_hub",
DeprecationWarning, stacklevel=2)
return hubs.use_hub(*a, **kw)
@@ -55,7 +59,8 @@ def tcp_listener(address, backlog=50):
socket object on which one should call ``accept()`` to accept a connection
on the newly bound socket.
"""
warnings.warn("""eventlet.api.tcp_listener is deprecated. Please use eventlet.listen instead.""",
warnings.warn(
"""eventlet.api.tcp_listener is deprecated. Please use eventlet.listen instead.""",
DeprecationWarning, stacklevel=2)
from eventlet import greenio, util
@@ -74,7 +79,8 @@ def ssl_listener(address, certificate, private_key):
Returns a socket object on which one should call ``accept()`` to
accept a connection on the newly bound socket.
"""
warnings.warn("""eventlet.api.ssl_listener is deprecated. Please use eventlet.wrap_ssl(eventlet.listen()) instead.""",
warnings.warn("""eventlet.api.ssl_listener is deprecated. Please use eventlet.wrap_ssl(eventlet.listen(
)) instead.""",
DeprecationWarning, stacklevel=2)
from eventlet import util
import socket
@@ -90,7 +96,8 @@ def connect_tcp(address, localaddr=None):
Create a TCP connection to address ``(host, port)`` and return the socket.
Optionally, bind to localaddr ``(host, port)`` first.
"""
warnings.warn("""eventlet.api.connect_tcp is deprecated. Please use eventlet.connect instead.""",
warnings.warn(
"""eventlet.api.connect_tcp is deprecated. Please use eventlet.connect instead.""",
DeprecationWarning, stacklevel=2)
from eventlet import greenio, util

View File

@@ -4,6 +4,7 @@ from eventlet import greenthread
__all__ = ['get_ident', 'local']
def get_ident():
""" Returns ``id()`` of current greenlet. Useful for debugging."""
return id(greenthread.getcurrent())
@@ -13,6 +14,7 @@ def get_ident():
# arguments in a local variable without calling __init__ directly
class _localbase(object):
__slots__ = '_local__args', '_local__greens'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
object.__setattr__(self, '_local__args', (args, kw))
@@ -21,6 +23,7 @@ class _localbase(object):
raise TypeError("Initialization arguments are not supported")
return self
def _patch(thrl):
greens = object.__getattribute__(thrl, '_local__greens')
# until we can store the localdict on greenlets themselves,

View File

@@ -26,28 +26,32 @@ def Event(*a, **kw):
def event(*a, **kw):
warnings.warn("The event class has been capitalized and moved! Please "
warnings.warn(
"The event class has been capitalized and moved! Please "
"construct event.Event objects instead.",
DeprecationWarning, stacklevel=2)
return _event.Event(*a, **kw)
def Semaphore(count):
warnings.warn("The Semaphore class has moved! Please "
warnings.warn(
"The Semaphore class has moved! Please "
"use semaphore.Semaphore instead.",
DeprecationWarning, stacklevel=2)
return semaphoremod.Semaphore(count)
def BoundedSemaphore(count):
warnings.warn("The BoundedSemaphore class has moved! Please "
warnings.warn(
"The BoundedSemaphore class has moved! Please "
"use semaphore.BoundedSemaphore instead.",
DeprecationWarning, stacklevel=2)
return semaphoremod.BoundedSemaphore(count)
def semaphore(count=0, limit=None):
warnings.warn("coros.semaphore is deprecated. Please use either "
warnings.warn(
"coros.semaphore is deprecated. Please use either "
"semaphore.Semaphore or semaphore.BoundedSemaphore instead.",
DeprecationWarning, stacklevel=2)
if limit is None:
@@ -74,6 +78,7 @@ class metaphore(object):
A decrementing
B decrementing
"""
def __init__(self):
self.counter = 0
self.event = _event.Event()
@@ -126,13 +131,15 @@ def execute(func, *args, **kw):
>>> evt.wait()
('foo', 1)
"""
warnings.warn("Coros.execute is deprecated. Please use eventlet.spawn "
warnings.warn(
"Coros.execute is deprecated. Please use eventlet.spawn "
"instead.", DeprecationWarning, stacklevel=2)
return greenthread.spawn(func, *args, **kw)
def CoroutinePool(*args, **kwargs):
warnings.warn("CoroutinePool is deprecated. Please use "
warnings.warn(
"CoroutinePool is deprecated. Please use "
"eventlet.GreenPool instead.", DeprecationWarning, stacklevel=2)
from eventlet.pool import Pool
return Pool(*args, **kwargs)
@@ -141,7 +148,8 @@ def CoroutinePool(*args, **kwargs):
class Queue(object):
def __init__(self):
warnings.warn("coros.Queue is deprecated. Please use "
warnings.warn(
"coros.Queue is deprecated. Please use "
"eventlet.queue.Queue instead.",
DeprecationWarning, stacklevel=2)
self.items = collections.deque()
@@ -215,7 +223,8 @@ class Queue(object):
class Channel(object):
def __init__(self, max_size=0):
warnings.warn("coros.Channel is deprecated. Please use "
warnings.warn(
"coros.Channel is deprecated. Please use "
"eventlet.queue.Queue(0) instead.",
DeprecationWarning, stacklevel=2)
self.max_size = max_size

View File

@@ -268,6 +268,7 @@ class TpooledConnectionPool(BaseConnectionPool):
"""A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(
@@ -287,6 +288,7 @@ class TpooledConnectionPool(BaseConnectionPool):
class RawConnectionPool(BaseConnectionPool):
"""A pool which gives out plain database connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(
@@ -316,38 +318,71 @@ class GenericConnectionWrapper(object):
# * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
# * other?
def __enter__(self): return self._base.__enter__()
def __exit__(self, exc, value, tb): return self._base.__exit__(exc, value, tb)
def __repr__(self): return self._base.__repr__()
def affected_rows(self): return self._base.affected_rows()
def autocommit(self, *args, **kwargs): return self._base.autocommit(*args, **kwargs)
def begin(self): return self._base.begin()
def change_user(self, *args, **kwargs): return self._base.change_user(*args, **kwargs)
def character_set_name(self, *args, **kwargs): return self._base.character_set_name(*args, **kwargs)
def close(self, *args, **kwargs): return self._base.close(*args, **kwargs)
def commit(self, *args, **kwargs): return self._base.commit(*args, **kwargs)
def cursor(self, *args, **kwargs): return self._base.cursor(*args, **kwargs)
def dump_debug_info(self, *args, **kwargs): return self._base.dump_debug_info(*args, **kwargs)
def errno(self, *args, **kwargs): return self._base.errno(*args, **kwargs)
def error(self, *args, **kwargs): return self._base.error(*args, **kwargs)
def errorhandler(self, *args, **kwargs): return self._base.errorhandler(*args, **kwargs)
def insert_id(self, *args, **kwargs): return self._base.insert_id(*args, **kwargs)
def literal(self, *args, **kwargs): return self._base.literal(*args, **kwargs)
def set_character_set(self, *args, **kwargs): return self._base.set_character_set(*args, **kwargs)
def set_sql_mode(self, *args, **kwargs): return self._base.set_sql_mode(*args, **kwargs)
def show_warnings(self): return self._base.show_warnings()
def warning_count(self): return self._base.warning_count()
def ping(self, *args, **kwargs): return self._base.ping(*args, **kwargs)
def query(self, *args, **kwargs): return self._base.query(*args, **kwargs)
def rollback(self, *args, **kwargs): return self._base.rollback(*args, **kwargs)
def select_db(self, *args, **kwargs): return self._base.select_db(*args, **kwargs)
def set_server_option(self, *args, **kwargs): return self._base.set_server_option(*args, **kwargs)
def server_capabilities(self, *args, **kwargs): return self._base.server_capabilities(*args, **kwargs)
def shutdown(self, *args, **kwargs): return self._base.shutdown(*args, **kwargs)
def sqlstate(self, *args, **kwargs): return self._base.sqlstate(*args, **kwargs)
def stat(self, *args, **kwargs): return self._base.stat(*args, **kwargs)
def store_result(self, *args, **kwargs): return self._base.store_result(*args, **kwargs)
def string_literal(self, *args, **kwargs): return self._base.string_literal(*args, **kwargs)
def thread_id(self, *args, **kwargs): return self._base.thread_id(*args, **kwargs)
def use_result(self, *args, **kwargs): return self._base.use_result(*args, **kwargs)
@@ -357,6 +392,7 @@ class PooledConnectionWrapper(GenericConnectionWrapper):
- ``bool(conn)`` returns a reasonable value
- returns itself to the pool if it gets garbage collected
"""
def __init__(self, baseconn, pool):
super(PooledConnectionWrapper, self).__init__(baseconn)
self._pool = pool
@@ -393,6 +429,7 @@ class DatabaseConnector(object):
This is an object which will maintain a collection of database
connection pools on a per-host basis.
"""
def __init__(self, module, credentials,
conn_pool=None, *args, **kwargs):
"""constructor
@@ -408,7 +445,8 @@ class DatabaseConnector(object):
self._module = module
self._args = args
self._kwargs = kwargs
self._credentials = credentials # this is a map of hostname to username/password
# this is a map of hostname to username/password
self._credentials = credentials
self._databases = {}
def credentials_for(self, host):

View File

@@ -40,6 +40,7 @@ class Event(object):
"""
_result = None
_exc = None
def __init__(self):
self._waiters = set()
self.reset()

View File

@@ -5,7 +5,8 @@ from eventlet.green import urllib
from eventlet.green import select
test = None # bind prior to patcher.inject to silence pyflakes warning below
patcher.inject('CGIHTTPServer',
patcher.inject(
'CGIHTTPServer',
globals(),
('BaseHTTPServer', BaseHTTPServer),
('SimpleHTTPServer', SimpleHTTPServer),

View File

@@ -5,9 +5,11 @@ from eventlet import greenio
from eventlet.hubs import trampoline
import socket
class GreenConnection(greenio.GreenSocket):
""" Nonblocking wrapper for SSL.Connection objects.
"""
def __init__(self, ctx, sock=None):
if sock is not None:
fd = orig_SSL.Connection(ctx, sock)

View File

@@ -1,2 +1,5 @@
import rand, crypto, SSL, tsafe
import rand
import crypto
import SSL
import tsafe
from version import __version__

View File

@@ -6,18 +6,22 @@ __patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
# these classes exist to paper over the major operational difference between
# eventlet.queue.Queue and the stdlib equivalents
class Queue(queue.Queue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super(Queue, self).__init__(maxsize)
class PriorityQueue(queue.PriorityQueue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super(PriorityQueue, self).__init__(maxsize)
class LifoQueue(queue.LifoQueue):
def __init__(self, maxsize=0):
if maxsize == 0:

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import BaseHTTPServer
from eventlet.green import urllib
patcher.inject('SimpleHTTPServer',
patcher.inject(
'SimpleHTTPServer',
globals(),
('BaseHTTPServer', BaseHTTPServer),
('urllib', urllib))

View File

@@ -18,6 +18,7 @@ from eventlet.greenio import _fileobject
try:
__original_fromfd__ = __socket.fromfd
def fromfd(*args):
return socket(__original_fromfd__(*args))
except AttributeError:
@@ -25,6 +26,7 @@ except AttributeError:
try:
__original_socketpair__ = __socket.socketpair
def socketpair(*args):
one, two = __original_socketpair__(*args)
return socket(one), socket(two)
@@ -32,7 +34,6 @@ except AttributeError:
pass
def _convert_to_sslerror(ex):
""" Transliterates SSL.SysCallErrors to socket.sslerrors"""
return sslerror((ex.args[0], ex.args[1]))
@@ -41,6 +42,7 @@ def _convert_to_sslerror(ex):
class GreenSSLObject(object):
""" Wrapper object around the SSLObjects returned by socket.ssl, which have a
slightly different interface from SSL.Connection objects. """
def __init__(self, green_ssl_obj):
""" Should only be called by a 'green' socket.ssl """
self.connection = green_ssl_obj

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import asyncore
from eventlet.green import socket
patcher.inject('asynchat',
patcher.inject(
'asynchat',
globals(),
('asyncore', asyncore),
('socket', socket))

View File

@@ -3,7 +3,8 @@ from eventlet.green import select
from eventlet.green import socket
from eventlet.green import time
patcher.inject("asyncore",
patcher.inject(
"asyncore",
globals(),
('select', select),
('socket', socket),

View File

@@ -11,8 +11,12 @@ from eventlet.patcher import slurp_properties
__all__ = os_orig.__all__
__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
slurp_properties(os_orig, globals(),
ignore=__patched__, srckeys=dir(os_orig))
slurp_properties(
os_orig,
globals(),
ignore=__patched__,
srckeys=dir(os_orig))
def fdopen(fd, *args, **kw):
"""fdopen(fd [, mode='r' [, bufsize]]) -> file_object
@@ -26,6 +30,8 @@ def fdopen(fd, *args, **kw):
raise OSError(*e.args)
__original_read__ = os_orig.read
def read(fd, n):
"""read(fd, buffersize) -> string
@@ -46,6 +52,8 @@ def read(fd, n):
return ''
__original_write__ = os_orig.write
def write(fd, st):
"""write(fd, string) -> byteswritten
@@ -62,6 +70,7 @@ def write(fd, st):
raise
hubs.trampoline(fd, write=True)
def wait():
"""wait() -> (pid, status)
@@ -69,6 +78,8 @@ def wait():
return waitpid(0, 0)
__original_waitpid__ = os_orig.waitpid
def waitpid(pid, options):
"""waitpid(...)
waitpid(pid, options) -> (pid, status)

View File

@@ -99,7 +99,6 @@ class Profile(profile_orig.Profile):
finally:
self.TallyTimings()
def trace_dispatch_return_extend_back(self, frame, t):
"""A hack function to override error checking in parent class. It
allows invalid returns (where frames weren't preveiously entered into
@@ -110,7 +109,7 @@ class Profile(profile_orig.Profile):
if isinstance(self.cur[-2], Profile.fake_frame):
return False
self.trace_dispatch_call(frame, 0)
return self.trace_dispatch_return(frame, t);
return self.trace_dispatch_return(frame, t)
def trace_dispatch_c_return_extend_back(self, frame, t):
# same for c return
@@ -119,7 +118,6 @@ class Profile(profile_orig.Profile):
self.trace_dispatch_c_call(frame, 0)
return self.trace_dispatch_return(frame, t)
# Add "return safety" to the dispatchers
dispatch = dict(profile_orig.Profile.dispatch)
dispatch.update({
@@ -144,7 +142,6 @@ class Profile(profile_orig.Profile):
self.simulate_call("profiler")
self.simulate_call("new_tasklet")
def ContextWrap(f):
@functools.wraps(f)
def ContextWrapper(self, arg, t):

View File

@@ -25,6 +25,7 @@ if greendns:
getnameinfo = greendns.getnameinfo
__patched__ = __patched__ + ['gethostbyname_ex', 'getnameinfo']
def create_connection(address,
timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
@@ -57,5 +58,3 @@ def create_connection(address,
sock.close()
raise error(msg)

View File

@@ -21,7 +21,9 @@ else:
__patched__ = ['SSLSocket', 'wrap_socket', 'sslwrap_simple']
class GreenSSLSocket(__ssl.SSLSocket):
""" This is a green version of the SSLSocket class from the ssl module added
in 2.6. For documentation on it, please see the Python standard
documentation.
@@ -37,6 +39,7 @@ class GreenSSLSocket(__ssl.SSLSocket):
"""
# we are inheriting from SSLSocket because its constructor calls
# do_handshake whose behavior we wish to override
def __init__(self, sock, *args, **kw):
if not isinstance(sock, GreenSocket):
sock = GreenSocket(sock)
@@ -176,7 +179,6 @@ class GreenSSLSocket(__ssl.SSLSocket):
return ''
raise
def recv_into(self, buffer, nbytes=None, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
@@ -235,7 +237,6 @@ class GreenSSLSocket(__ssl.SSLSocket):
if time.time() >= end:
raise timeout_exc('timed out')
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
@@ -274,7 +275,8 @@ class GreenSSLSocket(__ssl.SSLSocket):
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
new_ssl = type(self)(newsock,
new_ssl = type(self)(
newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
@@ -290,6 +292,7 @@ class GreenSSLSocket(__ssl.SSLSocket):
SSLSocket = GreenSSLSocket
def wrap_socket(sock, *a, **kw):
return GreenSSLSocket(sock, *a, **kw)

View File

@@ -20,6 +20,7 @@ if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
"""This exception is raised when the timeout expires while waiting for
a child process.
"""
def __init__(self, cmd, output=None):
self.cmd = cmd
self.output = output

View File

@@ -66,6 +66,7 @@ def interrupt_main():
if hasattr(__thread, 'stack_size'):
__original_stack_size__ = __thread.stack_size
def stack_size(size=None):
if size is None:
return __original_stack_size__()

View File

@@ -12,7 +12,8 @@ __orig_threading = patcher.original('threading')
__threadlocal = __orig_threading.local()
patcher.inject('threading',
patcher.inject(
'threading',
globals(),
('thread', thread),
('time', time))
@@ -21,9 +22,12 @@ del patcher
_count = 1
class _GreenThread(object):
"""Wrapper for GreenThread objects to provide Thread-like attributes
and methods"""
def __init__(self, g):
global _count
self._g = g
@@ -61,6 +65,7 @@ class _GreenThread(object):
__threading = None
def _fixup_thread(t):
# Some third-party packages (lockfile) will try to patch the
# threading.Thread class with a get_name attribute if it doesn't

View File

@@ -5,7 +5,8 @@ from eventlet.green import socket
from eventlet.green import time
from eventlet.green import urllib
patcher.inject('urllib2',
patcher.inject(
'urllib2',
globals(),
('httplib', httplib),
('socket', socket),

View File

@@ -33,6 +33,7 @@ class _QueueLock(object):
is called, the threads are awoken in the order they blocked,
one at a time. This lock can be required recursively by the same
thread."""
def __init__(self):
self._waiters = deque()
self._count = 0
@@ -118,6 +119,7 @@ class _BlockedThread(object):
return True
return False
class Context(__zmq__.Context):
"""Subclass of :class:`zmq.core.context.Context`
"""
@@ -133,6 +135,7 @@ class Context(__zmq__.Context):
raise ZMQError(ENOTSUP)
return Socket(self, socket_type)
def _wraps(source_fn):
"""A decorator that copies the __name__ and __doc__ from the given
function
@@ -191,6 +194,7 @@ _Socket_send_multipart = _Socket.send_multipart
_Socket_recv_multipart = _Socket.recv_multipart
_Socket_getsockopt = _Socket.getsockopt
class Socket(_Socket):
"""Green version of :class:`zmq.core.socket.Socket
@@ -206,6 +210,7 @@ class Socket(_Socket):
* send_multipart
* recv_multipart
"""
def __init__(self, context, socket_type):
super(Socket, self).__init__(context, socket_type)
@@ -292,7 +297,6 @@ class Socket(_Socket):
# receiver. (Could check EVENTS for POLLIN here)
self._eventlet_recv_event.wake()
@_wraps(_Socket.send_multipart)
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""A send_multipart method that's safe to use when multiple

View File

@@ -118,6 +118,7 @@ class GreenSocket(object):
Pass False to indicate that socket is already in non-blocking mode
to save syscalls.
"""
def __init__(self, family_or_realsock=socket.AF_INET, *args, **kwargs):
should_set_nonblocking = kwargs.pop('set_nonblocking', True)
if isinstance(family_or_realsock, six.integer_types):
@@ -534,6 +535,7 @@ class GreenPipe(_fileobject):
- Universal new lines are not supported and newlines property not implementeded
- file argument can be descriptor, file name or file object.
"""
def __init__(self, f, mode='r', bufsize=-1):
if not isinstance(f, six.string_types + (int, file)):
raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f)

View File

@@ -15,6 +15,7 @@ DEBUG = True
class GreenPool(object):
"""The GreenPool class is a pool of green threads.
"""
def __init__(self, size=1000):
self.size = size
self.coroutines_running = set()
@@ -187,6 +188,7 @@ class GreenPile(object):
than the one which is calling spawn. The iterator will exit early in that
situation.
"""
def __init__(self, size_or_pool=1000):
if isinstance(size_or_pool, GreenPool):
self.pool = size_or_pool

View File

@@ -12,6 +12,7 @@ __all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n', 'spawn_after', 'spawn_afte
getcurrent = greenlet.getcurrent
def sleep(seconds=0):
"""Yield control to another eligible coroutine until at least *seconds* have
elapsed.
@@ -109,7 +110,8 @@ def spawn_after_local(seconds, func, *args, **kwargs):
def call_after_global(seconds, func, *args, **kwargs):
warnings.warn("call_after_global is renamed to spawn_after, which"
warnings.warn(
"call_after_global is renamed to spawn_after, which"
"has the same signature and semantics (plus a bit extra). Please do a"
" quick search-and-replace on your codebase, thanks!",
DeprecationWarning, stacklevel=2)
@@ -117,7 +119,8 @@ def call_after_global(seconds, func, *args, **kwargs):
def call_after_local(seconds, function, *args, **kwargs):
warnings.warn("call_after_local is renamed to spawn_after_local, which"
warnings.warn(
"call_after_local is renamed to spawn_after_local, which"
"has the same signature and semantics (plus a bit extra).",
DeprecationWarning, stacklevel=2)
hub = hubs.get_hub()
@@ -142,6 +145,7 @@ def exc_after(seconds, *throw_args):
TimeoutError = timeout.Timeout
with_timeout = timeout.with_timeout
def _spawn_n(seconds, func, args, kwargs):
hub = hubs.get_hub()
g = greenlet.greenlet(func, parent=hub.greenlet)
@@ -154,6 +158,7 @@ class GreenThread(greenlet.greenlet):
property of being able to retrieve the return value of the main function.
Do not construct GreenThread objects directly; call :func:`spawn` to get one.
"""
def __init__(self, parent):
greenlet.greenlet.__init__(self, self.main, parent)
self._exit_event = event.Event()
@@ -239,6 +244,7 @@ class GreenThread(greenlet.greenlet):
to :class:`greenlet.GreenletExit`)."""
return cancel(self, *throw_args)
def cancel(g, *throw_args):
"""Like :func:`kill`, but only terminates the greenthread if it hasn't
already started execution. If the grenthread has already started
@@ -246,6 +252,7 @@ def cancel(g, *throw_args):
if not g:
kill(g, *throw_args)
def kill(g, *throw_args):
"""Terminates the target greenthread by raising an exception into it.
Whatever that greenthread might be doing; be it waiting for I/O or another

View File

@@ -32,6 +32,7 @@ from eventlet.hubs.poll import READ, WRITE
# NOTE: we rely on the fact that the epoll flag constants
# are identical in value to the poll constants
class Hub(poll.Hub):
def __init__(self, clock=time.time):
BaseHub.__init__(self, clock)

View File

@@ -1,10 +1,9 @@
import errno
import heapq
import math
import traceback
import signal
import sys
import warnings
import traceback
arm_alarm = None
if hasattr(signal, 'setitimer'):
@@ -20,9 +19,9 @@ else:
signal.alarm(math.ceil(seconds))
arm_alarm = alarm_signal
from eventlet.support import greenlets as greenlet, clear_sys_exc_info
from eventlet.hubs import timer, IOClosed
from eventlet import patcher
from eventlet.hubs import timer, IOClosed
from eventlet.support import greenlets as greenlet, clear_sys_exc_info
time = patcher.original('time')
g_prevent_multiple_readers = True
@@ -39,6 +38,7 @@ def closed_callback(fileno):
class FdListener(object):
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
""" The following are required:
cb - the standard callback, which will switch into the
@@ -61,6 +61,7 @@ class FdListener(object):
self.mark_as_closed = mark_as_closed
self.spent = False
self.greenlet = greenlet.getcurrent()
def __repr__(self):
return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno,
self.cb, self.tb)
@@ -77,10 +78,15 @@ noop = FdListener(READ, 0, lambda x: None, lambda x: None, None)
# in debug mode, track the call site that created the listener
class DebugListener(FdListener):
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
self.where_called = traceback.format_stack()
self.greenlet = greenlet.getcurrent()
super(DebugListener, self).__init__(evtype, fileno, cb, tb, mark_as_closed)
def __repr__(self):
return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % (
self.evtype,
@@ -159,12 +165,13 @@ class BaseHub(object):
bucket = self.listeners[evtype]
if fileno in bucket:
if g_prevent_multiple_readers:
raise RuntimeError("Second simultaneous %s on fileno %s "\
"detected. Unless you really know what you're doing, "\
"make sure that only one greenthread can %s any "\
"particular socket. Consider using a pools.Pool. "\
"If you do know what you're doing and want to disable "\
"this error, call "\
raise RuntimeError(
"Second simultaneous %s on fileno %s "
"detected. Unless you really know what you're doing, "
"make sure that only one greenthread can %s any "
"particular socket. Consider using a pools.Pool. "
"If you do know what you're doing and want to disable "
"this error, call "
"eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; THAT THREAD=%s" % (
evtype, fileno, evtype, cb, bucket[fileno]))
# store off the second listener in another structure
@@ -232,7 +239,6 @@ class BaseHub(object):
"""
self._obsolete(fileno)
def remove_descriptor(self, fileno):
""" Completely remove all listeners for this fileno. For internal use
only."""
@@ -244,7 +250,7 @@ class BaseHub(object):
for listener in listeners:
try:
listener.cb(fileno)
except Exception as e:
except Exception:
self.squelch_generic_exception(sys.exc_info())
def close_one(self):

View File

@@ -6,7 +6,7 @@ select = patcher.original('select')
time = patcher.original('time')
sleep = time.sleep
from eventlet.support import get_errno, clear_sys_exc_info
from eventlet.support import clear_sys_exc_info
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
@@ -53,8 +53,7 @@ class Hub(BaseHub):
events = self._events.setdefault(fileno, {})
if evtype not in events:
try:
event = select.kevent(fileno,
FILTERS.get(evtype), select.KQ_EV_ADD)
event = select.kevent(fileno, FILTERS.get(evtype), select.KQ_EV_ADD)
self._control([event], 0, 0)
events[evtype] = event
except ValueError:
@@ -63,8 +62,10 @@ class Hub(BaseHub):
return listener
def _delete_events(self, events):
del_events = [select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events]
del_events = [
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events
]
self._control(del_events, 0, 0)
def remove(self, listener):
@@ -75,7 +76,7 @@ class Hub(BaseHub):
event = self._events[fileno].pop(evtype)
try:
self._delete_events([event])
except OSError as e:
except OSError:
pass
def remove_descriptor(self, fileno):
@@ -83,9 +84,9 @@ class Hub(BaseHub):
try:
events = self._events.pop(fileno).values()
self._delete_events(events)
except KeyError as e:
except KeyError:
pass
except OSError as e:
except OSError:
pass
def wait(self, seconds=None):

View File

@@ -1,13 +1,13 @@
import sys
import errno
import signal
import sys
from eventlet import patcher
select = patcher.original('select')
time = patcher.original('time')
sleep = time.sleep
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
from eventlet.support import get_errno, clear_sys_exc_info
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop, alarm_handler
EXC_MASK = select.POLLERR | select.POLLHUP
READ_MASK = select.POLLIN | select.POLLPRI

View File

@@ -4,8 +4,7 @@ import event
import types
from eventlet.support import greenlets as greenlet, six
from eventlet.hubs.hub import BaseHub, FdListener, READ, WRITE
from eventlet.support import six
from eventlet.hubs.hub import BaseHub, READ, WRITE
class event_wrapper(object):
@@ -35,6 +34,7 @@ class event_wrapper(object):
def pending(self):
return bool(self.impl and self.impl.pending())
class Hub(BaseHub):
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
@@ -167,6 +167,7 @@ def _scheduled_call(event_impl, handle, evtype, arg):
finally:
event_impl.delete()
def _scheduled_call_local(event_impl, handle, evtype, arg):
cb, args, kwargs, caller_greenlet = arg
try:

View File

@@ -1,7 +1,7 @@
import sys
import errno
import sys
from eventlet import patcher
from eventlet.support import get_errno, clear_sys_exc_info, six
from eventlet.support import get_errno, clear_sys_exc_info
select = patcher.original('select')
time = patcher.original('time')

View File

@@ -4,6 +4,7 @@ from twisted.internet.base import DelayedCall as TwistedDelayedCall
from eventlet.support import greenlets as greenlet
from eventlet.hubs.hub import FdListener, READ, WRITE
class DelayedCall(TwistedDelayedCall):
"fix DelayedCall to behave like eventlet's Timer in some respects"
@@ -13,6 +14,7 @@ class DelayedCall(TwistedDelayedCall):
return
return TwistedDelayedCall.cancel(self)
class LocalDelayedCall(DelayedCall):
def __init__(self, *args, **kwargs):
@@ -29,6 +31,7 @@ class LocalDelayedCall(DelayedCall):
cancelled = property(_get_cancelled, _set_cancelled)
def callLater(DelayedCallClass, reactor, _seconds, _f, *args, **kw):
# the same as original but creates fixed DelayedCall instance
assert callable(_f), "%s is not callable" % _f
@@ -43,6 +46,7 @@ def callLater(DelayedCallClass, reactor, _seconds, _f, *args, **kw):
reactor._newTimedCalls.append(tple)
return tple
class socket_rwdescriptor(FdListener):
# implements(IReadWriteDescriptor)
def __init__(self, evtype, fileno, cb):
@@ -50,6 +54,7 @@ class socket_rwdescriptor(FdListener):
if not isinstance(fileno, (int, long)):
raise TypeError("Expected int or long, got %s" % type(fileno))
# Twisted expects fileno to be a callable, not an attribute
def _fileno():
return fileno
self.fileno = _fileno
@@ -131,6 +136,7 @@ class BaseTwistedHub(object):
def schedule_call_local(self, seconds, func, *args, **kwargs):
from twisted.internet import reactor
def call_if_greenlet_alive(*args1, **kwargs1):
if timer.greenlet.dead:
return
@@ -166,7 +172,6 @@ class BaseTwistedHub(object):
from twisted.internet import reactor
return reactor.getWriters()
def get_timers_count(self):
from twisted.internet import reactor
return len(reactor.getDelayedCalls())
@@ -253,10 +258,12 @@ class TwistedHub(BaseTwistedHub):
Hub = TwistedHub
class DaemonicThread(threading.Thread):
def _set_daemon(self):
return True
def make_twisted_threadpool_daemonic():
from twisted.python.threadpool import ThreadPool
if ThreadPool.threadFactory != DaemonicThread:

View File

@@ -1,5 +1,5 @@
import sys
import imp
import sys
from eventlet.support import six
@@ -13,6 +13,7 @@ class SysModulesSaver(object):
"""Class that captures some subset of the current state of
sys.modules. Pass in an iterator of module names to the
constructor."""
def __init__(self, module_names=()):
self._saved = {}
imp.acquire_lock()
@@ -81,22 +82,22 @@ def inject(module_name, new_globals, *additional_modules):
for name, mod in additional_modules:
sys.modules[name] = mod
## Remove the old module from sys.modules and reimport it while
## the specified modules are in place
# Remove the old module from sys.modules and reimport it while
# the specified modules are in place
sys.modules.pop(module_name, None)
try:
module = __import__(module_name, {}, {}, module_name.split('.')[:-1])
if new_globals is not None:
## Update the given globals dictionary with everything from this new module
# Update the given globals dictionary with everything from this new module
for name in dir(module):
if name not in __exclude:
new_globals[name] = getattr(module, name)
## Keep a reference to the new module to prevent it from dying
# Keep a reference to the new module to prevent it from dying
sys.modules[patched_name] = module
finally:
saver.restore() ## Put the original modules back
saver.restore() # Put the original modules back
return module
@@ -140,6 +141,7 @@ def patch_function(func, *additional_modules):
saver.restore()
return patched
def _original_patch_function(func, *module_names):
"""Kind of the contrapositive of patch_function: decorates a
function such that when it's called, sys.modules is populated only
@@ -201,6 +203,8 @@ def original(modname):
return sys.modules[original_name]
already_patched = {}
def monkey_patch(**on):
"""Globally patches certain system modules to be greenthread-friendly.
@@ -219,7 +223,7 @@ def monkey_patch(**on):
default_on = on.pop("all", None)
for k in six.iterkeys(on):
if k not in accepted_args:
raise TypeError("monkey_patch() got an unexpected "\
raise TypeError("monkey_patch() got an unexpected "
"keyword argument %r" % k)
if default_on is None:
default_on = not (True in on.values())
@@ -279,6 +283,7 @@ def monkey_patch(**on):
finally:
imp.release_lock()
def is_monkey_patched(module):
"""Returns True if the given module is monkeypatched currently, False if
not. *module* can be either the module itself or its name.
@@ -290,14 +295,17 @@ def is_monkey_patched(module):
return module in already_patched or \
getattr(module, '__name__', None) in already_patched
def _green_os_modules():
from eventlet.green import os
return [('os', os)]
def _green_select_modules():
from eventlet.green import select
return [('select', select)]
def _green_socket_modules():
from eventlet.green import socket
try:
@@ -306,6 +314,7 @@ def _green_socket_modules():
except ImportError:
return [('socket', socket)]
def _green_thread_modules():
from eventlet.green import Queue
from eventlet.green import thread
@@ -315,10 +324,12 @@ def _green_thread_modules():
if six.PY3:
return [('queue', Queue), ('_thread', thread), ('threading', threading)]
def _green_time_modules():
from eventlet.green import time
return [('time', time)]
def _green_MySQLdb():
try:
from eventlet.green import MySQLdb
@@ -326,6 +337,7 @@ def _green_MySQLdb():
except ImportError:
return []
def _green_builtins():
try:
from eventlet.green import builtin
@@ -344,16 +356,14 @@ def slurp_properties(source, destination, ignore=[], srckeys=None):
"""
if srckeys is None:
srckeys = source.__all__
destination.update(dict([(name, getattr(source, name))
destination.update(dict([
(name, getattr(source, name))
for name in srckeys
if not (
name.startswith('__') or
name in ignore)
if not (name.startswith('__') or name in ignore)
]))
if __name__ == "__main__":
import sys
sys.argv.pop(0)
monkey_patch()
with open(sys.argv[0]) as f:

View File

@@ -54,6 +54,7 @@ class Pool(object):
greenthread calling :meth:`get` to cooperatively yield until an item
is :meth:`put` in.
"""
def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None):
"""*order_as_stack* governs the ordering of the items in the free pool.
If ``False`` (the default), the free items collection (of items that
@@ -176,6 +177,6 @@ class TokenPool(Pool):
that the coroutine which holds the token has a right to consume some
limited resource.
"""
def create(self):
return Token()

View File

@@ -737,5 +737,3 @@ class Pool(object):
g = self.linkable_class()
g.link(lambda *_args: self.semaphore.release())
return g

View File

@@ -116,7 +116,7 @@ class Process(object):
written = self.child_stdin.write(stuff)
self.child_stdin.flush()
except ValueError as e:
## File was closed
# File was closed
assert str(e) == 'I/O operation on closed file'
if written == 0:
self.dead_callback()

View File

@@ -378,6 +378,7 @@ class Queue(LightQueue):
In all other respects, this Queue class resembled the standard library,
:class:`Queue`.
'''
def __init__(self, maxsize=None):
LightQueue.__init__(self, maxsize)
self.unfinished_tasks = 0

View File

@@ -14,7 +14,8 @@ def get_errno(exc):
"""
try:
if exc.errno is not None: return exc.errno
if exc.errno is not None:
return exc.errno
except AttributeError:
pass
try:

View File

@@ -34,6 +34,8 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
import sys
from eventlet import patcher
from eventlet.green import _socket_nodns
@@ -46,7 +48,8 @@ dns = patcher.import_patched('dns',
select=select)
for pkg in ('dns.query', 'dns.exception', 'dns.inet', 'dns.message',
'dns.rdatatype', 'dns.resolver', 'dns.reversename'):
setattr(dns, pkg.split('.')[1], patcher.import_patched(pkg,
setattr(dns, pkg.split('.')[1], patcher.import_patched(
pkg,
socket=_socket_nodns,
time=time,
select=select))
@@ -55,14 +58,18 @@ socket = _socket_nodns
DNS_QUERY_TIMEOUT = 10.0
#
# Resolver instance used to perfrom DNS lookups.
#
class FakeAnswer(list):
expiration = 0
class FakeRecord(object):
pass
class ResolverProxy(object):
def __init__(self, *args, **kwargs):
self._resolver = None
@@ -113,6 +120,7 @@ class ResolverProxy(object):
#
resolver = ResolverProxy(dev=True)
def resolve(name):
error = None
rrset = None
@@ -120,9 +128,9 @@ def resolve(name):
if rrset is None or time.time() > rrset.expiration:
try:
rrset = resolver.query(name)
except dns.exception.Timeout as e:
except dns.exception.Timeout:
error = (socket.EAI_AGAIN, 'Lookup timed out')
except dns.exception.DNSException as e:
except dns.exception.DNSException:
error = (socket.EAI_NODATA, 'No address associated with hostname')
else:
pass
@@ -134,6 +142,8 @@ def resolve(name):
else:
sys.stderr.write('DNS error: %r %r\n' % (name, error))
return rrset
#
# methods
#
@@ -147,9 +157,9 @@ def getaliases(host):
try:
answers = dns.resolver.query(host, 'cname')
except dns.exception.Timeout as e:
except dns.exception.Timeout:
error = (socket.EAI_AGAIN, 'Lookup timed out')
except dns.exception.DNSException as e:
except dns.exception.DNSException:
error = (socket.EAI_NODATA, 'No address associated with hostname')
else:
for record in answers:
@@ -160,6 +170,7 @@ def getaliases(host):
return cnames
def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
"""Replacement for Python's socket.getaddrinfo.
@@ -178,6 +189,7 @@ def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
value.append((socket.AF_INET, socktype, proto, '', (rr.address, port)))
return value
def gethostbyname(hostname):
"""Replacement for Python's socket.gethostbyname.
@@ -189,6 +201,7 @@ def gethostbyname(hostname):
rrset = resolve(hostname)
return rrset[0].address
def gethostbyname_ex(hostname):
"""Replacement for Python's socket.gethostbyname_ex.
@@ -204,6 +217,7 @@ def gethostbyname_ex(hostname):
addrs.append(rr.address)
return (hostname, [], addrs)
def getnameinfo(sockaddr, flags):
"""Replacement for Python's socket.getnameinfo.
@@ -232,10 +246,10 @@ def getnameinfo(sockaddr, flags):
if len(rrset) > 1:
raise socket.error('sockaddr resolved to multiple addresses')
host = rrset[0].target.to_text(omit_final_dot=True)
except dns.exception.Timeout as e:
except dns.exception.Timeout:
if flags & socket.NI_NAMEREQD:
raise socket.gaierror((socket.EAI_AGAIN, 'Lookup timed out'))
except dns.exception.DNSException as e:
except dns.exception.DNSException:
if flags & socket.NI_NAMEREQD:
raise socket.gaierror(
(socket.EAI_NONAME, 'Name or service not known'))
@@ -246,9 +260,9 @@ def getnameinfo(sockaddr, flags):
raise socket.error('sockaddr resolved to multiple addresses')
if flags & socket.NI_NUMERICHOST:
host = rrset[0].address
except dns.exception.Timeout as e:
except dns.exception.Timeout:
raise socket.gaierror((socket.EAI_AGAIN, 'Lookup timed out'))
except dns.exception.DNSException as e:
except dns.exception.DNSException:
raise socket.gaierror(
(socket.EAI_NODATA, 'No address associated with hostname'))
@@ -272,6 +286,7 @@ def is_ipv4_addr(host):
return True
return False
def _net_read(sock, count, expiration):
"""coro friendly replacement for dns.query._net_write
Read the specified number of bytes from sock. Keep trying until we
@@ -284,7 +299,7 @@ def _net_read(sock, count, expiration):
try:
n = sock.recv(count)
except socket.timeout:
## Q: Do we also need to catch coro.CoroutineSocketWake and pass?
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
if n == '':
@@ -293,6 +308,7 @@ def _net_read(sock, count, expiration):
s = s + n
return s
def _net_write(sock, data, expiration):
"""coro friendly replacement for dns.query._net_write
Write the specified data to the socket.
@@ -305,12 +321,12 @@ def _net_write(sock, data, expiration):
try:
current += sock.send(data[current:])
except socket.timeout:
## Q: Do we also need to catch coro.CoroutineSocketWake and pass?
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
def udp(
q, where, timeout=DNS_QUERY_TIMEOUT, port=53, af=None, source=None,
def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53, af=None, source=None,
source_port=0, ignore_unexpected=False):
"""coro friendly replacement for dns.query.udp
Return the response obtained after sending a query via UDP.
@@ -362,14 +378,14 @@ def udp(
try:
s.sendto(wire, destination)
except socket.timeout:
## Q: Do we also need to catch coro.CoroutineSocketWake and pass?
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
while 1:
try:
(wire, from_address) = s.recvfrom(65535)
except socket.timeout:
## Q: Do we also need to catch coro.CoroutineSocketWake and pass?
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
if from_address == destination:
@@ -386,6 +402,7 @@ def udp(
raise dns.query.BadResponse()
return r
def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
af=None, source=None, source_port=0):
"""coro friendly replacement for dns.query.tcp
@@ -434,7 +451,7 @@ def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
try:
s.connect(destination)
except socket.timeout:
## Q: Do we also need to catch coro.CoroutineSocketWake and pass?
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
@@ -454,10 +471,10 @@ def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
raise dns.query.BadResponse()
return r
def reset():
resolver.clear()
# Install our coro-friendly replacements for the tcp and udp query methods.
dns.query.tcp = tcp
dns.query.udp = udp

View File

@@ -29,6 +29,7 @@ from psycopg2 import extensions
from eventlet.hubs import trampoline
def make_psycopg_green():
"""Configure Psycopg to be used with eventlet in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
@@ -38,6 +39,7 @@ def make_psycopg_green():
extensions.set_wait_callback(eventlet_wait_callback)
def eventlet_wait_callback(conn, timeout=-1):
"""A wait callback useful to allow eventlet to work with Psycopg."""
while 1:

View File

@@ -3,10 +3,10 @@ from py.magic import greenlet
import sys
import types
def emulate():
module = types.ModuleType('greenlet')
sys.modules['greenlet'] = module
module.greenlet = greenlet
module.getcurrent = greenlet.getcurrent
module.GreenletExit = greenlet.GreenletExit

View File

@@ -110,7 +110,7 @@ class MovedModule(_LazyDescr):
# well if this MovedModule is for an module that is unavailable on this
# machine (like winreg on Unix systems). Thus, we pretend __file__ and
# __name__ don't exist if the module hasn't been loaded yet. See issues
# #51 and #53.
# 51 and #53.
if attr in ("__file__", "__name__") and self.mod not in sys.modules:
raise AttributeError
_module = self._resolve()
@@ -159,7 +159,6 @@ class MovedAttribute(_LazyDescr):
return getattr(module, self.attr)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
@@ -477,14 +476,17 @@ def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
@@ -493,6 +495,7 @@ def iterlists(d, **kw):
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
@@ -512,14 +515,18 @@ else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
@@ -531,7 +538,6 @@ _add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
@@ -550,7 +556,6 @@ else:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
@@ -563,6 +568,7 @@ if print_ is None:
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
@@ -618,6 +624,7 @@ def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):

View File

@@ -3,10 +3,10 @@ from stackless import greenlet
import sys
import types
def emulate():
module = types.ModuleType('greenlet')
sys.modules['greenlet'] = module
module.greenlet = greenlet
module.getcurrent = greenlet.getcurrent
module.GreenletExit = greenlet.GreenletExit

View File

@@ -30,6 +30,8 @@ _NONE = object()
# deriving from BaseException so that "except Exception as e" doesn't catch
# Timeout exceptions.
class Timeout(BaseException):
"""Raises *exception* in the current greenthread after *timeout* seconds.

View File

@@ -166,6 +166,7 @@ class Proxy(object):
of strings, which represent the names of attributes that should be
wrapped in Proxy objects when accessed.
"""
def __init__(self, obj, autowrap=(), autowrap_names=()):
self._obj = obj
self._autowrap = autowrap

View File

@@ -1,11 +1,13 @@
import socket
import sys
import warnings
__original_socket__ = socket.socket
def tcp_socket():
warnings.warn("eventlet.util.tcp_socket is deprecated. "
warnings.warn(
"eventlet.util.tcp_socket is deprecated. "
"Please use the standard socket technique for this instead: "
"sock = socket.socket()",
DeprecationWarning, stacklevel=2)
@@ -15,8 +17,11 @@ def tcp_socket():
# if ssl is available, use eventlet.green.ssl for our ssl implementation
from eventlet.green import ssl
def wrap_ssl(sock, certificate=None, private_key=None, server_side=False):
warnings.warn("eventlet.util.wrap_ssl is deprecated. "
warnings.warn(
"eventlet.util.wrap_ssl is deprecated. "
"Please use the eventlet.green.ssl.wrap_socket()",
DeprecationWarning, stacklevel=2)
return ssl.wrap_socket(
@@ -28,7 +33,8 @@ def wrap_ssl(sock, certificate=None, private_key=None, server_side=False):
def wrap_socket_with_coroutine_socket(use_thread_pool=None):
warnings.warn("eventlet.util.wrap_socket_with_coroutine_socket() is now "
warnings.warn(
"eventlet.util.wrap_socket_with_coroutine_socket() is now "
"eventlet.patcher.monkey_patch(all=False, socket=True)",
DeprecationWarning, stacklevel=2)
from eventlet import patcher
@@ -36,7 +42,8 @@ def wrap_socket_with_coroutine_socket(use_thread_pool=None):
def wrap_pipes_with_coroutine_pipes():
warnings.warn("eventlet.util.wrap_pipes_with_coroutine_pipes() is now "
warnings.warn(
"eventlet.util.wrap_pipes_with_coroutine_pipes() is now "
"eventlet.patcher.monkey_patch(all=False, os=True)",
DeprecationWarning, stacklevel=2)
from eventlet import patcher
@@ -44,7 +51,8 @@ def wrap_pipes_with_coroutine_pipes():
def wrap_select_with_coroutine_select():
warnings.warn("eventlet.util.wrap_select_with_coroutine_select() is now "
warnings.warn(
"eventlet.util.wrap_select_with_coroutine_select() is now "
"eventlet.patcher.monkey_patch(all=False, select=True)",
DeprecationWarning, stacklevel=2)
from eventlet import patcher
@@ -57,7 +65,8 @@ def wrap_threading_local_with_coro_local():
Since greenlets cannot cross threads, so this should be semantically
identical to ``threadlocal.local``
"""
warnings.warn("eventlet.util.wrap_threading_local_with_coro_local() is now "
warnings.warn(
"eventlet.util.wrap_threading_local_with_coro_local() is now "
"eventlet.patcher.monkey_patch(all=False, thread=True) -- though"
"note that more than just _local is patched now.",
DeprecationWarning, stacklevel=2)
@@ -67,7 +76,8 @@ def wrap_threading_local_with_coro_local():
def socket_bind_and_listen(descriptor, addr=('', 0), backlog=50):
warnings.warn("eventlet.util.socket_bind_and_listen is deprecated."
warnings.warn(
"eventlet.util.socket_bind_and_listen is deprecated."
"Please use the standard socket methodology for this instead:"
"sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)"
"sock.bind(addr)"
@@ -80,7 +90,8 @@ def socket_bind_and_listen(descriptor, addr=('', 0), backlog=50):
def set_reuse_addr(descriptor):
warnings.warn("eventlet.util.set_reuse_addr is deprecated."
warnings.warn(
"eventlet.util.set_reuse_addr is deprecated."
"Please use the standard socket methodology for this instead:"
"sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)",
DeprecationWarning, stacklevel=2)

View File

@@ -3,11 +3,11 @@ import codecs
import collections
import errno
from random import Random
from socket import error as SocketError
import string
import struct
import sys
import time
from socket import error as SocketError
try:
from hashlib import md5, sha1
@@ -15,7 +15,6 @@ except ImportError: # pragma NO COVER
from md5 import md5
from sha import sha as sha1
import eventlet
from eventlet import semaphore
from eventlet import wsgi
from eventlet.green import socket

View File

@@ -4,6 +4,7 @@ from eventlet.green import socket
PORT = 3001
participants = set()
def read_chat_forever(writer, reader):
line = reader.readline()
while line:

View File

@@ -13,7 +13,8 @@ $ python examples/distributed_websocket_chat.py -p tcp://127.0.0.1:12345 -s tcp:
So all messages are published to port 12345 and the device forwards all the
messages to 12346 where they are subscribed to
"""
import os, sys
import os
import sys
import eventlet
from collections import defaultdict
from eventlet import spawn_n, sleep
@@ -26,6 +27,7 @@ from uuid import uuid1
use_hub('zeromq')
ctx = zmq.Context()
class IDName(object):
def __init__(self):
@@ -50,6 +52,7 @@ class IDName(object):
participants = defaultdict(IDName)
def subscribe_and_distribute(sub_socket):
global participants
while True:
@@ -62,6 +65,7 @@ def subscribe_and_distribute(sub_socket):
except:
del participants[ws]
@websocket.WebSocketWSGI
def handle(ws):
global pub_socket
@@ -82,6 +86,7 @@ def handle(ws):
finally:
del participants[ws]
def dispatch(environ, start_response):
"""Resolves to the web page or the websocket depending on the path."""
global port

View File

@@ -13,12 +13,14 @@ from __future__ import print_function
import eventlet
def handle(fd):
print("client connected")
while True:
# pass through every non-eof line
x = fd.readline()
if not x: break
if not x:
break
fd.write(x)
fd.flush()
print("echoed", x, end=' ')

View File

@@ -7,10 +7,12 @@ feedparser = eventlet.import_patched('feedparser')
# the pool provides a safety limit on our concurrency
pool = eventlet.GreenPool()
def fetch_title(url):
d = feedparser.parse(url)
return d.feed.get('title', '')
def app(environ, start_response):
if environ['REQUEST_METHOD'] != 'POST':
start_response('403 Forbidden', [])

View File

@@ -5,9 +5,12 @@ starting from a simple framework like this.
"""
import eventlet
def closed_callback():
print("called back")
def forward(source, dest, cb=lambda: None):
"""Forwards bytes unidirectionally from source to dest"""
while True:

View File

@@ -35,6 +35,7 @@ def fetch(url, seen, pool):
# spawned greenthreads start their own stacks
pool.spawn_n(fetch, new_url, seen, pool)
def crawl(start_url):
"""Recursively crawl starting from *start_url*. Returns a set of
urls that were found."""

View File

@@ -24,6 +24,7 @@ def handle(ws):
ws.send("0 %s %s\n" % (i, random.random()))
eventlet.sleep(0.1)
def dispatch(environ, start_response):
""" This resolves to the web page or the websocket depending on
the path."""

View File

@@ -8,6 +8,7 @@ PORT = 7000
participants = set()
@websocket.WebSocketWSGI
def handle(ws):
participants.add(ws)
@@ -21,6 +22,7 @@ def handle(ws):
finally:
participants.remove(ws)
def dispatch(environ, start_response):
"""Resolves to the web page or the websocket depending on the path."""
if environ['PATH_INFO'] == '/chat':

View File

@@ -8,6 +8,7 @@ http://pypi.python.org/pypi/Spawning/
import eventlet
from eventlet import wsgi
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found', [('Content-Type', 'text/plain')])

View File

@@ -1,4 +1,5 @@
import eventlet, sys
import eventlet
import sys
from eventlet.green import socket, zmq
from eventlet.hubs import use_hub
use_hub('zeromq')
@@ -7,6 +8,7 @@ ADDR = 'ipc:///tmp/chat'
ctx = zmq.Context()
def publish(writer):
print("connected")
@@ -25,6 +27,7 @@ def publish(writer):
PORT = 3001
def read_chat_forever(reader, pub_socket):
line = reader.readline()

View File

@@ -3,6 +3,7 @@ import eventlet
CTX = zmq.Context(1)
def bob_client(ctx, count):
print("STARTING BOB")
bob = zmq.Socket(CTX, zmq.REQ)
@@ -13,6 +14,7 @@ def bob_client(ctx, count):
bob.send("HI")
print("BOB GOT:", bob.recv())
def alice_server(ctx, count):
print("STARTING ALICE")
alice = zmq.Socket(CTX, zmq.REP)

View File

@@ -2,10 +2,12 @@ import eventlet
from eventlet import event
from tests import LimitedTestCase
class TestEvent(LimitedTestCase):
def test_waiting_for_event(self):
evt = event.Event()
value = 'some stuff'
def send_to_event():
evt.send(value)
eventlet.spawn_n(send_to_event)
@@ -19,8 +21,8 @@ class TestEvent(LimitedTestCase):
def _test_multiple_waiters(self, exception):
evt = event.Event()
value = 'some stuff'
results = []
def wait_on_event(i_am_done):
evt.wait()
results.append(True)
@@ -48,6 +50,7 @@ class TestEvent(LimitedTestCase):
self.assertRaises(AssertionError, evt.reset)
value = 'some stuff'
def send_to_event():
evt.send(value)
eventlet.spawn_n(send_to_event)
@@ -61,6 +64,7 @@ class TestEvent(LimitedTestCase):
# reset and everything should be happy
evt.reset()
def send_to_event2():
evt.send(value2)
eventlet.spawn_n(send_to_event2)
@@ -75,4 +79,3 @@ class TestEvent(LimitedTestCase):
# shouldn't see the RuntimeError again
eventlet.Timeout(0.001)
self.assertRaises(eventlet.Timeout, evt.wait)

View File

@@ -3,10 +3,13 @@ from eventlet import greenthread
from eventlet.support import greenlets as greenlet
_g_results = []
def passthru(*args, **kw):
_g_results.append((args, kw))
return args, kw
def waiter(a):
greenthread.sleep(0.1)
return a
@@ -19,6 +22,7 @@ class Asserts(object):
assert gt.dead
assert not gt
class Spawn(LimitedTestCase, Asserts):
def tearDown(self):
global _g_results
@@ -66,6 +70,7 @@ class Spawn(LimitedTestCase, Asserts):
def test_link(self):
results = []
def link_func(g, *a, **kw):
results.append(g)
results.append(a)
@@ -77,6 +82,7 @@ class Spawn(LimitedTestCase, Asserts):
def test_link_after_exited(self):
results = []
def link_func(g, *a, **kw):
results.append(g)
results.append(a)
@@ -101,6 +107,7 @@ class Spawn(LimitedTestCase, Asserts):
gt.wait()
self.assertEqual(called, [True])
class SpawnAfter(Spawn):
def test_basic(self):
gt = greenthread.spawn_after(0.1, passthru, 20)
@@ -123,6 +130,7 @@ class SpawnAfter(Spawn):
gt.kill()
self.assert_dead(gt)
class SpawnAfterLocal(LimitedTestCase, Asserts):
def setUp(self):
super(SpawnAfterLocal, self).setUp()

View File

@@ -125,6 +125,7 @@ except AttributeError:
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
@@ -320,12 +321,16 @@ def _setup_func(funcopy, mock):
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
@@ -360,6 +365,7 @@ def _is_magic(name):
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
@@ -369,6 +375,7 @@ class _SentinelObject(object):
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
@@ -413,11 +420,13 @@ _allowed_names = set(
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
@@ -428,7 +437,6 @@ def _delegating_property(name):
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
@@ -474,15 +482,14 @@ def _check_and_set_parent(parent, value, name, new_name):
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
@@ -494,7 +501,6 @@ class NonCallableMock(Base):
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
@@ -535,7 +541,6 @@ class NonCallableMock(Base):
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
@@ -548,7 +553,6 @@ class NonCallableMock(Base):
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
@@ -557,7 +561,6 @@ class NonCallableMock(Base):
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
@@ -574,7 +577,6 @@ class NonCallableMock(Base):
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
@@ -587,7 +589,6 @@ class NonCallableMock(Base):
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
@@ -599,7 +600,6 @@ class NonCallableMock(Base):
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
@@ -612,7 +612,6 @@ class NonCallableMock(Base):
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
@@ -629,7 +628,6 @@ class NonCallableMock(Base):
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
@@ -648,7 +646,6 @@ class NonCallableMock(Base):
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
@@ -670,7 +667,6 @@ class NonCallableMock(Base):
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
@@ -705,7 +701,6 @@ class NonCallableMock(Base):
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
@@ -755,7 +750,6 @@ class NonCallableMock(Base):
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
extras = self._mock_methods or []
@@ -769,7 +763,6 @@ class NonCallableMock(Base):
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
@@ -803,7 +796,6 @@ class NonCallableMock(Base):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
@@ -822,13 +814,10 @@ class NonCallableMock(Base):
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
@@ -838,7 +827,6 @@ class NonCallableMock(Base):
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
@@ -853,7 +841,6 @@ class NonCallableMock(Base):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
@@ -864,7 +851,6 @@ class NonCallableMock(Base):
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
@@ -896,7 +882,6 @@ class NonCallableMock(Base):
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
@@ -910,7 +895,6 @@ class NonCallableMock(Base):
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
@@ -930,7 +914,6 @@ class NonCallableMock(Base):
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
@@ -946,7 +929,6 @@ def _try_iter(obj):
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
@@ -961,19 +943,16 @@ class CallableMixin(Base):
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
@@ -1046,7 +1025,6 @@ class CallableMixin(Base):
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
@@ -1102,7 +1080,6 @@ class Mock(CallableMixin, NonCallableMock):
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
@@ -1158,7 +1135,6 @@ class _patch(object):
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
@@ -1171,13 +1147,11 @@ class _patch(object):
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
@@ -1191,7 +1165,6 @@ class _patch(object):
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
@@ -1241,7 +1214,6 @@ class _patch(object):
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
@@ -1262,7 +1234,6 @@ class _patch(object):
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
@@ -1388,7 +1359,6 @@ class _patch(object):
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
@@ -1409,21 +1379,18 @@ class _patch(object):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
@@ -1626,10 +1593,10 @@ class _patch_dict(object):
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
@@ -1640,7 +1607,6 @@ class _patch_dict(object):
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
@@ -1651,12 +1617,10 @@ class _patch_dict(object):
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
@@ -1682,7 +1646,6 @@ class _patch_dict(object):
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
@@ -1695,7 +1658,6 @@ class _patch_dict(object):
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
@@ -1761,6 +1723,7 @@ _non_defaults = set('__%s__' % method for method in [
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
@@ -1816,6 +1779,7 @@ def _get_eq(self):
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
@@ -1823,6 +1787,7 @@ def _get_ne(self):
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
@@ -1840,7 +1805,6 @@ _side_effect_methods = {
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
@@ -1863,13 +1827,11 @@ def _set_return_value(mock, method, name):
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
@@ -1892,9 +1854,9 @@ class MagicMixin(object):
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
@@ -1905,7 +1867,6 @@ class NonCallableMagicMock(MagicMixin, NonCallableMock):
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
@@ -1917,6 +1878,7 @@ class MagicMock(MagicMixin, Mock):
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
@@ -1927,7 +1889,6 @@ class MagicMock(MagicMixin, Mock):
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
@@ -1950,7 +1911,6 @@ class MagicProxy(object):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
@@ -1966,7 +1926,6 @@ class _ANY(object):
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
@@ -1984,7 +1943,6 @@ def _format_call_signature(name, args, kwargs):
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
@@ -2036,14 +1994,12 @@ class _Call(tuple):
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
@@ -2093,11 +2049,9 @@ class _Call(tuple):
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
@@ -2105,14 +2059,12 @@ class _Call(tuple):
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
@@ -2133,7 +2085,6 @@ class _Call(tuple):
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
@@ -2150,7 +2101,6 @@ class _Call(tuple):
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
@@ -2377,10 +2327,12 @@ class PropertyMock(Mock):
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)

View File

@@ -8,6 +8,7 @@ except ImportError:
import re
import glob
def parse_stdout(s):
argv = re.search('^===ARGV=(.*?)$', s, re.M).group(1)
argv = argv.split()
@@ -32,6 +33,7 @@ def parse_stdout(s):
unittest_delim = '----------------------------------------------------------------------'
def parse_unittest_output(s):
s = s[s.rindex(unittest_delim) + len(unittest_delim):]
num = int(re.search('^Ran (\d+) test.*?$', s, re.M).group(1))
@@ -52,6 +54,7 @@ def parse_unittest_output(s):
timeout = int(timeout_match.group(1))
return num, error, fail, timeout
def main(db):
c = sqlite3.connect(db)
c.execute('''create table if not exists parsed_command_record

View File

@@ -22,12 +22,12 @@ class TestIntPool(TestCase):
# If you do a get, you should ALWAYS do a put, probably like this:
# try:
# thing = self.pool.get()
# # do stuff
# do stuff
# finally:
# self.pool.put(thing)
# with self.pool.some_api_name() as thing:
# # do stuff
# do stuff
self.assertEqual(self.pool.get(), 1)
self.assertEqual(self.pool.get(), 2)
self.assertEqual(self.pool.get(), 3)
@@ -42,6 +42,7 @@ class TestIntPool(TestCase):
def test_exhaustion(self):
waiter = Queue(0)
def consumer():
gotten = None
try:
@@ -66,6 +67,7 @@ class TestIntPool(TestCase):
def test_blocks_on_pool(self):
waiter = Queue(0)
def greedy():
self.pool.get()
self.pool.get()
@@ -83,13 +85,13 @@ class TestIntPool(TestCase):
# no one should be waiting yet.
self.assertEqual(self.pool.waiting(), 0)
## Wait for greedy
# Wait for greedy
eventlet.sleep(0)
## Greedy should be blocking on the last get
# Greedy should be blocking on the last get
self.assertEqual(self.pool.waiting(), 1)
## Send will never be called, so balance should be 0.
# Send will never be called, so balance should be 0.
self.assertFalse(not waiter.full())
eventlet.kill(killable)
@@ -110,6 +112,7 @@ class TestIntPool(TestCase):
self.pool = IntPool(min_size=0, max_size=size)
queue = Queue()
results = []
def just_put(pool_item, index):
self.pool.put(pool_item)
queue.put(index)
@@ -142,6 +145,7 @@ class TestIntPool(TestCase):
def test_create_contention(self):
creates = [0]
def sleep_create():
creates[0] += 1
eventlet.sleep()
@@ -163,28 +167,31 @@ class TestIntPool(TestCase):
class TestAbstract(TestCase):
mode = 'static'
def test_abstract(self):
## Going for 100% coverage here
## A Pool cannot be used without overriding create()
# Going for 100% coverage here
# A Pool cannot be used without overriding create()
pool = pools.Pool()
self.assertRaises(NotImplementedError, pool.get)
class TestIntPool2(TestCase):
mode = 'static'
def setUp(self):
self.pool = IntPool(min_size=3, max_size=3)
def test_something(self):
self.assertEqual(len(self.pool.free_items), 3)
## Cover the clause in get where we get from the free list instead of creating
## an item on get
# Cover the clause in get where we get from the free list instead of creating
# an item on get
gotten = self.pool.get()
self.assertEqual(gotten, 1)
class TestOrderAsStack(TestCase):
mode = 'static'
def setUp(self):
self.pool = IntPool(max_size=3, order_as_stack=True)
@@ -204,6 +211,7 @@ class RaisePool(pools.Pool):
class TestCreateRaises(TestCase):
mode = 'static'
def setUp(self):
self.pool = RaisePool(max_size=3)
@@ -222,4 +230,3 @@ class TestTookTooLong(Exception):
if __name__ == '__main__':
main()

View File

@@ -6,6 +6,7 @@ warnings.simplefilter('ignore', DeprecationWarning)
from eventlet import processes, api
warnings.simplefilter('default', DeprecationWarning)
class TestEchoPool(LimitedTestCase):
def setUp(self):
super(TestEchoPool, self).setUp()

View File

@@ -1,6 +1,7 @@
from tests import LimitedTestCase, main
import eventlet
from eventlet import event
from eventlet import event, hubs, queue
from tests import LimitedTestCase, main
def do_bail(q):
eventlet.Timeout(0, RuntimeError())
@@ -10,6 +11,7 @@ def do_bail(q):
except RuntimeError:
return 'timed out'
class TestQueue(LimitedTestCase):
def test_send_first(self):
q = eventlet.Queue()
@@ -18,6 +20,7 @@ class TestQueue(LimitedTestCase):
def test_send_last(self):
q = eventlet.Queue()
def waiter(q):
self.assertEqual(q.get(), 'hi2')
@@ -51,6 +54,7 @@ class TestQueue(LimitedTestCase):
def test_zero_max_size(self):
q = eventlet.Queue(0)
def sender(evt, q):
q.put('hi')
evt.send('done')
@@ -70,6 +74,7 @@ class TestQueue(LimitedTestCase):
def test_resize_up(self):
q = eventlet.Queue(0)
def sender(evt, q):
q.put('hi')
evt.send('done')
@@ -84,7 +89,6 @@ class TestQueue(LimitedTestCase):
gt.wait()
def test_resize_down(self):
size = 5
q = eventlet.Queue(5)
for i in range(5):
@@ -97,6 +101,7 @@ class TestQueue(LimitedTestCase):
def test_resize_to_Unlimited(self):
q = eventlet.Queue(0)
def sender(evt, q):
q.put('hi')
evt.send('done')
@@ -115,8 +120,7 @@ class TestQueue(LimitedTestCase):
q = eventlet.Queue()
sendings = ['1', '2', '3', '4']
gts = [eventlet.spawn(q.get)
for x in sendings]
gts = [eventlet.spawn(q.get) for x in sendings]
eventlet.sleep(0.01) # get 'em all waiting
@@ -180,11 +184,12 @@ class TestQueue(LimitedTestCase):
def test_channel_send(self):
channel = eventlet.Queue(0)
events = []
def another_greenlet():
events.append(channel.get())
events.append(channel.get())
gt = eventlet.spawn(another_greenlet)
eventlet.spawn(another_greenlet)
events.append('sending')
channel.put('hello')
@@ -194,7 +199,6 @@ class TestQueue(LimitedTestCase):
self.assertEqual(['sending', 'hello', 'sent hello', 'world', 'sent world'], events)
def test_channel_wait(self):
channel = eventlet.Queue(0)
events = []
@@ -206,7 +210,7 @@ class TestQueue(LimitedTestCase):
channel.put('world')
events.append('sent world')
gt = eventlet.spawn(another_greenlet)
eventlet.spawn(another_greenlet)
events.append('waiting')
events.append(channel.get())
@@ -236,13 +240,11 @@ class TestQueue(LimitedTestCase):
self.assertEqual(results, [1, 2, 3])
def test_channel_sender_timing_out(self):
from eventlet import queue
c = eventlet.Queue(0)
self.assertRaises(queue.Full, c.put, "hi", timeout=0.001)
self.assertRaises(queue.Empty, c.get_nowait)
def test_task_done(self):
from eventlet import queue, debug
channel = queue.Queue(0)
X = object()
gt = eventlet.spawn(channel.put, X)
@@ -267,7 +269,6 @@ def store_result(result, func, *args):
class TestNoWait(LimitedTestCase):
def test_put_nowait_simple(self):
from eventlet import hubs,queue
hub = hubs.get_hub()
result = []
q = eventlet.Queue(1)
@@ -276,11 +277,10 @@ class TestNoWait(LimitedTestCase):
eventlet.sleep(0)
eventlet.sleep(0)
assert len(result) == 2, result
assert result[0]==None, result
assert result[0] is None, result
assert isinstance(result[1], queue.Full), result
def test_get_nowait_simple(self):
from eventlet import hubs,queue
hub = hubs.get_hub()
result = []
q = queue.Queue(1)
@@ -294,7 +294,6 @@ class TestNoWait(LimitedTestCase):
# get_nowait must work from the mainloop
def test_get_nowait_unlock(self):
from eventlet import hubs,queue
hub = hubs.get_hub()
result = []
q = queue.Queue(0)
@@ -316,11 +315,10 @@ class TestNoWait(LimitedTestCase):
# put_nowait must work from the mainloop
def test_put_nowait_unlock(self):
from eventlet import hubs,queue
hub = hubs.get_hub()
result = []
q = queue.Queue(0)
p = eventlet.spawn(q.get)
eventlet.spawn(q.get)
assert q.empty(), q
assert q.full(), q
eventlet.sleep(0)

View File

@@ -6,6 +6,7 @@ Many of these tests make connections to external servers, and all.py tries to sk
from eventlet import debug
debug.hub_prevent_multiple_readers(False)
def restart_hub():
from eventlet import hubs
hub = hubs.get_hub()
@@ -15,6 +16,7 @@ def restart_hub():
hub.abort()
hubs.use_hub(hub_shortname)
def assimilate_patched(name):
try:
modobj = __import__(name, globals(), locals(), ['test_main'])
@@ -26,6 +28,7 @@ def assimilate_patched(name):
method_name = name + "_test_main"
try:
test_method = modobj.test_main
def test_main():
restart_hub()
test_method()

View File

@@ -38,4 +38,3 @@ def get_modules():
print("Skipping network tests")
return test_modules

View File

@@ -3,6 +3,7 @@ eventlet.sleep(0)
from eventlet import patcher
patcher.monkey_patch()
def assimilate_real(name):
print("Assimilating", name)
try:

View File

@@ -1,7 +1,8 @@
from eventlet import patcher
from eventlet.green import SimpleHTTPServer
patcher.inject('test.test_SimpleHTTPServer',
patcher.inject(
'test.test_SimpleHTTPServer',
globals(),
('SimpleHTTPServer', SimpleHTTPServer))

View File

@@ -6,7 +6,8 @@ from eventlet.green import thread
from eventlet.green import threading
from eventlet.green import time
patcher.inject("test.test_asynchat",
patcher.inject(
"test.test_asynchat",
globals(),
('asyncore', asyncore),
('asynchat', asynchat),

View File

@@ -7,6 +7,7 @@ from eventlet.green import time
patcher.inject("test.test_asyncore", globals())
def new_closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import httplib
from eventlet.green import socket
patcher.inject('test.test_httplib',
patcher.inject(
'test.test_httplib',
globals(),
('httplib', httplib),
('socket', socket))

View File

@@ -7,7 +7,8 @@ from eventlet.green import urllib
from eventlet.green import httplib
from eventlet.green import threading
patcher.inject('test.test_httpservers',
patcher.inject(
'test.test_httpservers',
globals(),
('BaseHTTPServer', BaseHTTPServer),
('SimpleHTTPServer', SimpleHTTPServer),

View File

@@ -1,7 +1,8 @@
from eventlet import patcher
from eventlet.green import os
patcher.inject('test.test_os',
patcher.inject(
'test.test_os',
globals(),
('os', os))

View File

@@ -3,7 +3,8 @@ from eventlet.green import Queue
from eventlet.green import threading
from eventlet.green import time
patcher.inject('test.test_queue',
patcher.inject(
'test.test_queue',
globals(),
('Queue', Queue),
('threading', threading),

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import select
patcher.inject('test.test_select',
patcher.inject(
'test.test_select',
globals(),
('select', select))

View File

@@ -7,7 +7,8 @@ from eventlet.green import time
from eventlet.green import thread
from eventlet.green import threading
patcher.inject('test.test_socket',
patcher.inject(
'test.test_socket',
globals(),
('socket', socket),
('select', select),

View File

@@ -6,6 +6,8 @@ from eventlet.green import socket
# enable network resource
import test.test_support
i_r_e = test.test_support.is_resource_enabled
def is_resource_enabled(resource):
if resource == 'network':
return True
@@ -24,6 +26,7 @@ patcher.inject('test.test_socket_ssl', globals())
test_basic = patcher.patch_function(test_basic)
test_rude_shutdown = patcher.patch_function(test_rude_shutdown)
def test_main():
if not hasattr(socket, "ssl"):
raise test_support.TestSkipped("socket module has no ssl support")

View File

@@ -11,7 +11,8 @@ from eventlet.green import threading
from test import test_support
test_support.use_resources = ['network']
patcher.inject('test.test_socketserver',
patcher.inject(
'test.test_socketserver',
globals(),
('SocketServer', SocketServer),
('socket', socket),

View File

@@ -12,6 +12,8 @@ from eventlet.green import urllib
# stupid test_support messing with our mojo
import test.test_support
i_r_e = test.test_support.is_resource_enabled
def is_resource_enabled(resource):
if resource == 'network':
return True
@@ -19,7 +21,8 @@ def is_resource_enabled(resource):
return i_r_e(resource)
test.test_support.is_resource_enabled = is_resource_enabled
patcher.inject('test.test_ssl',
patcher.inject(
'test.test_ssl',
globals(),
('asyncore', asyncore),
('BaseHTTPServer', BaseHTTPServer),

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import subprocess
from eventlet.green import time
patcher.inject('test.test_subprocess',
patcher.inject(
'test.test_subprocess',
globals(),
('subprocess', subprocess),
('time', time))

View File

@@ -2,6 +2,7 @@
from eventlet import coros
from eventlet.green import thread
def allocate_lock():
return coros.semaphore(1, 9999)

View File

@@ -7,7 +7,8 @@ from eventlet.green import time
from eventlet import hubs
hubs.get_hub()
patcher.inject('test.test_threading_local',
patcher.inject(
'test.test_threading_local',
globals(),
('time', time),
('thread', thread),

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import socket
from eventlet.green import time
patcher.inject('test.test_timeout',
patcher.inject(
'test.test_timeout',
globals(),
('socket', socket),
('time', time))

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import httplib
from eventlet.green import urllib
patcher.inject('test.test_urllib',
patcher.inject(
'test.test_urllib',
globals(),
('httplib', httplib),
('urllib', urllib))

View File

@@ -2,7 +2,8 @@ from eventlet import patcher
from eventlet.green import socket
from eventlet.green import urllib2
patcher.inject('test.test_urllib2',
patcher.inject(
'test.test_urllib2',
globals(),
('socket', socket),
('urllib2', urllib2))

View File

@@ -5,7 +5,8 @@ from eventlet.green import threading
from eventlet.green import socket
from eventlet.green import urllib2
patcher.inject('test.test_urllib2_localnet',
patcher.inject(
'test.test_urllib2_localnet',
globals(),
('BaseHTTPServer', BaseHTTPServer),
('threading', threading),

View File

@@ -2,7 +2,6 @@ import eventlet
from eventlet.green import subprocess
import eventlet.patcher
from nose.plugins.skip import SkipTest
import os
import sys
import time
original_subprocess = eventlet.patcher.original('subprocess')
@@ -22,7 +21,7 @@ def test_subprocess_wait():
except subprocess.TimeoutExpired:
ok = True
tdiff = time.time() - t1
assert ok == True, 'did not raise subprocess.TimeoutExpired'
assert ok, 'did not raise subprocess.TimeoutExpired'
assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time'

View File

@@ -22,6 +22,7 @@ class TestQueue(LimitedTestCase):
@silence_warnings
def test_send_last(self):
q = coros.queue()
def waiter(q):
timer = eventlet.Timeout(0.1)
self.assertEqual(q.wait(), 'hi2')
@@ -57,6 +58,7 @@ class TestQueue(LimitedTestCase):
@silence_warnings
def test_zero_max_size(self):
q = coros.queue(0)
def sender(evt, q):
q.send('hi')
evt.send('done')
@@ -107,7 +109,6 @@ class TestQueue(LimitedTestCase):
except RuntimeError:
evt.send('timed out')
evt = Event()
spawn(do_receive, q, evt)
self.assertEqual(evt.wait(), 'timed out')
@@ -129,6 +130,7 @@ class TestQueue(LimitedTestCase):
def test_two_waiters_one_dies(self):
def waiter(q, evt):
evt.send(q.wait())
def do_receive(q, evt):
eventlet.Timeout(0, RuntimeError())
try:
@@ -209,7 +211,6 @@ class TestChannel(LimitedTestCase):
self.assertEqual(['sending', 'hello', 'sent hello', 'world', 'sent world'], events)
@silence_warnings
def test_wait(self):
sleep(0.1)

View File

@@ -6,6 +6,7 @@ from tests import LimitedTestCase
DELAY = 0.01
class TestEvent(LimitedTestCase):
def test_send_exc(self):

View File

@@ -14,6 +14,7 @@ class TestCoroutinePool(LimitedTestCase):
def test_execute_async(self):
done = _event.Event()
def some_work():
done.send()
pool = self.klass(0, 2)
@@ -22,6 +23,7 @@ class TestCoroutinePool(LimitedTestCase):
def test_execute(self):
value = 'return value'
def some_work():
return value
pool = self.klass(0, 2)
@@ -31,8 +33,10 @@ class TestCoroutinePool(LimitedTestCase):
def test_waiting(self):
pool = self.klass(0, 1)
done = _event.Event()
def consume():
done.wait()
def waiter(pool):
evt = pool.execute(consume)
evt.wait()
@@ -55,6 +59,7 @@ class TestCoroutinePool(LimitedTestCase):
def test_multiple_coros(self):
evt = _event.Event()
results = []
def producer():
results.append('prod')
evt.send()
@@ -74,8 +79,10 @@ class TestCoroutinePool(LimitedTestCase):
# this test verifies that local timers are not fired
# outside of the context of the execute method
timer_fired = []
def fire_timer():
timer_fired.append(True)
def some_work():
hubs.get_hub().schedule_call_local(0, fire_timer)
pool = self.klass(0, 2)
@@ -86,6 +93,7 @@ class TestCoroutinePool(LimitedTestCase):
def test_reentrant(self):
pool = self.klass(0, 1)
def reenter():
waiter = pool.execute(lambda a: a, 'reenter')
self.assertEqual('reenter', waiter.wait())
@@ -94,6 +102,7 @@ class TestCoroutinePool(LimitedTestCase):
outer_waiter.wait()
evt = _event.Event()
def reenter_async():
pool.execute_async(lambda a: a, 'reenter')
evt.send('done')
@@ -127,6 +136,7 @@ class TestCoroutinePool(LimitedTestCase):
def test_resize(self):
pool = self.klass(max_size=2)
evt = _event.Event()
def wait_long_time(e):
e.wait()
pool.execute(wait_long_time, evt)
@@ -157,8 +167,10 @@ class TestCoroutinePool(LimitedTestCase):
# any members
import sys
pool = self.klass(min_size=1, max_size=1)
def crash(*args, **kw):
raise RuntimeError("Whoa")
class FakeFile(object):
write = crash
@@ -195,6 +207,7 @@ class TestCoroutinePool(LimitedTestCase):
def test_track_slow_event(self):
pool = self.klass(track_events=True)
def slow():
api.sleep(0.1)
return 'ok'
@@ -209,6 +222,7 @@ class TestCoroutinePool(LimitedTestCase):
pool = self.klass(min_size=1, max_size=1)
tp = pools.TokenPool(max_size=1)
token = tp.get() # empty pool
def do_receive(tp):
timeout.Timeout(0, RuntimeError())
try:
@@ -245,6 +259,7 @@ class PoolBasicTests(LimitedTestCase):
p = self.klass(max_size=2)
self.assertEqual(p.free(), 2)
r = []
def foo(a):
r.append(a)
evt = p.execute(foo, 1)
@@ -276,6 +291,7 @@ class PoolBasicTests(LimitedTestCase):
def test_with_intpool(self):
from eventlet import pools
class IntPool(pools.Pool):
def create(self):
self.current_integer = getattr(self, 'current_integer', 0) + 1
@@ -303,4 +319,3 @@ class PoolBasicTests(LimitedTestCase):
if __name__ == '__main__':
main()

View File

@@ -11,9 +11,11 @@ from tests import LimitedTestCase, skipped, silence_warnings
DELAY = 0.01
class ExpectedError(Exception):
pass
class TestLink_Signal(LimitedTestCase):
@silence_warnings
@@ -97,6 +99,7 @@ class TestCase(LimitedTestCase):
self.link(p, event)
proc_flag = []
def receiver():
sleep(DELAY)
proc_flag.append('finished')
@@ -128,6 +131,7 @@ class TestCase(LimitedTestCase):
link(event)
proc_finished_flag = []
def myproc():
sleep(10)
proc_finished_flag.append('finished')
@@ -185,6 +189,7 @@ class TestReturn_link(TestCase):
self.check_timed_out(*xxxxx)
class TestReturn_link_value(TestReturn_link):
sync = False
link_method = 'link_value'
@@ -257,6 +262,7 @@ class TestRaise_link(TestCase):
for _ in range(3):
self._test_kill(p, False, proc.LinkedKilled)
class TestRaise_link_exception(TestRaise_link):
link_method = 'link_exception'
@@ -315,12 +321,15 @@ class TestStuff(LimitedTestCase):
# manually that they are
p = proc.spawn(lambda: 5)
results = []
def listener1(*args):
results.append(10)
raise ExpectedError('listener1')
def listener2(*args):
results.append(20)
raise ExpectedError('listener2')
def listener3(*args):
raise ExpectedError('listener3')
p.link(listener1)
@@ -341,14 +350,17 @@ class TestStuff(LimitedTestCase):
# notification must not happen after unlink even
# though notification process has been already started
results = []
def listener1(*args):
p.unlink(listener2)
results.append(5)
raise ExpectedError('listener1')
def listener2(*args):
p.unlink(listener1)
results.append(5)
raise ExpectedError('listener2')
def listener3(*args):
raise ExpectedError('listener3')
p.link(listener1)
@@ -368,6 +380,7 @@ class TestStuff(LimitedTestCase):
def test_killing_unlinked(self):
e = _event.Event()
def func():
try:
raise ExpectedError('test_killing_unlinked')

View File

@@ -3,6 +3,7 @@ import socket as _original_sock
from eventlet import api
from eventlet.green import socket
class TestSocketErrors(unittest.TestCase):
def test_connection_refused(self):
# open and close a dummy server to find an unused port

View File

@@ -9,6 +9,7 @@ try:
except ImportError:
pass
class Test(unittest.TestCase):
@requires_twisted
def test_block_on_success(self):

View File

@@ -14,6 +14,7 @@ except ImportError:
pr.UnbufferedTransport = None
pr.GreenTransport = None
pr.GreenClientCreator = lambda *a, **k: None
class reactor(object):
pass
@@ -35,6 +36,7 @@ if socket is not None:
port = s.getsockname()[1]
s.listen(5)
s.settimeout(delay * 3)
def serve():
conn, addr = s.accept()
conn.settimeout(delay + 1)
@@ -50,6 +52,7 @@ if socket is not None:
spawn(serve)
return port
def setup_server_SpawnFactory(self, delay=DELAY, port=0):
def handle(conn):
port.stopListening()
@@ -65,6 +68,7 @@ def setup_server_SpawnFactory(self, delay=DELAY, port=0):
port = reactor.listenTCP(0, pr.SpawnFactory(handle, LineOnlyReceiverTransport))
return port.getHost().port
class TestCase(unittest.TestCase):
transportBufferSize = None
@@ -79,6 +83,7 @@ class TestCase(unittest.TestCase):
if self.transportBufferSize is not None:
self.assertEqual(self.transportBufferSize, self.conn.transport.bufferSize)
class TestUnbufferedTransport(TestCase):
gtransportClass = pr.UnbufferedTransport
setup_server = setup_server_SpawnFactory
@@ -95,10 +100,12 @@ class TestUnbufferedTransport(TestCase):
self.conn.write('iterator\r\n')
self.assertEqual('you said iterator. BYE', ''.join(self.conn))
class TestUnbufferedTransport_bufsize1(TestUnbufferedTransport):
transportBufferSize = 1
setup_server = setup_server_SpawnFactory
class TestGreenTransport(TestUnbufferedTransport):
gtransportClass = pr.GreenTransport
setup_server = setup_server_SpawnFactory
@@ -149,6 +156,7 @@ class TestGreenTransport(TestUnbufferedTransport):
result = with_timeout(DELAY * 10, self.conn.read, timeout_value='timed out')
self.assertEqual('you said hi. BYE', result)
class TestGreenTransport_bufsize1(TestGreenTransport):
transportBufferSize = 1
@@ -162,7 +170,7 @@ class TestGreenTransport_bufsize1(TestGreenTransport):
# try:
# 1//0
# except:
# #self.conn.loseConnection(failure.Failure()) # does not work, why?
# self.conn.loseConnection(failure.Failure()) # does not work, why?
# spawn(self.conn._queue.send_exception, *sys.exc_info())
# self.assertEqual(self.conn.read(9), 'you said ')
# self.assertEqual(self.conn.read(7), 'hello. ')
@@ -178,7 +186,7 @@ class TestGreenTransport_bufsize1(TestGreenTransport):
# try:
# 1//0
# except:
# #self.conn.loseConnection(failure.Failure()) # does not work, why?
# self.conn.loseConnection(failure.Failure()) # does not work, why?
# spawn(self.conn._queue.send_exception, *sys.exc_info())
# self.assertEqual('BYE', self.conn.recv())
# self.assertRaises(ZeroDivisionError, self.conn.recv, 9)
@@ -212,6 +220,7 @@ class TestTLSError(unittest.TestCase):
from gnutls.errors import GNUTLSError
cred = X509Credentials(None, None)
ev = Event()
def handle(conn):
ev.send("handle must not be called")
s = reactor.listenTLS(0, pr.SpawnFactory(handle, LineOnlyReceiverTransport), cred)
@@ -227,6 +236,7 @@ try:
except ImportError:
del TestTLSError
@requires_twisted
def main():
unittest.main()

View File

@@ -1,53 +1,55 @@
import eventlet
from tests import LimitedTestCase
from eventlet import timeout
from eventlet import greenthread
DELAY = 0.01
class TestDirectRaise(LimitedTestCase):
def test_direct_raise_class(self):
try:
raise timeout.Timeout
except timeout.Timeout as t:
raise eventlet.Timeout
except eventlet.Timeout as t:
assert not t.pending, repr(t)
def test_direct_raise_instance(self):
tm = timeout.Timeout()
tm = eventlet.Timeout()
try:
raise tm
except timeout.Timeout as t:
except eventlet.Timeout as t:
assert tm is t, (tm, t)
assert not t.pending, repr(t)
def test_repr(self):
# just verify these don't crash
tm = timeout.Timeout(1)
greenthread.sleep(0)
tm = eventlet.Timeout(1)
eventlet.sleep(0)
repr(tm)
str(tm)
tm.cancel()
tm = timeout.Timeout(None, RuntimeError)
tm = eventlet.Timeout(None, RuntimeError)
repr(tm)
str(tm)
tm = timeout.Timeout(None, False)
tm = eventlet.Timeout(None, False)
repr(tm)
str(tm)
class TestWithTimeout(LimitedTestCase):
def test_with_timeout(self):
self.assertRaises(timeout.Timeout, timeout.with_timeout, DELAY, greenthread.sleep, DELAY*10)
self.assertRaises(eventlet.Timeout, eventlet.with_timeout, DELAY, eventlet.sleep, DELAY * 10)
X = object()
r = timeout.with_timeout(DELAY, greenthread.sleep, DELAY*10, timeout_value=X)
r = eventlet.with_timeout(DELAY, eventlet.sleep, DELAY * 10, timeout_value=X)
assert r is X, (r, X)
r = timeout.with_timeout(DELAY*10, greenthread.sleep,
DELAY, timeout_value=X)
r = eventlet.with_timeout(DELAY * 10, eventlet.sleep, DELAY, timeout_value=X)
assert r is None, r
def test_with_outer_timer(self):
def longer_timeout():
# this should not catch the outer timeout's exception
return timeout.with_timeout(DELAY * 10,
greenthread.sleep, DELAY * 20,
timeout_value='b')
self.assertRaises(timeout.Timeout,
timeout.with_timeout, DELAY, longer_timeout)
return eventlet.with_timeout(DELAY * 10, eventlet.sleep, DELAY * 20, timeout_value='b')
self.assertRaises(
eventlet.Timeout,
eventlet.with_timeout,
DELAY, longer_timeout)

Some files were not shown because too many files have changed in this diff Show More