Merge
This commit is contained in:
@@ -13,3 +13,6 @@ cover
|
||||
nosetests*.xml
|
||||
.coverage
|
||||
*,cover
|
||||
|
||||
syntax: re
|
||||
^.ropeproject/.*$
|
||||
|
@@ -1,5 +1,4 @@
|
||||
import collections
|
||||
import time
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
@@ -148,7 +147,8 @@ class Queue(object):
|
||||
return len(self.items)
|
||||
|
||||
def __repr__(self):
|
||||
params = (self.__class__.__name__, hex(id(self)), len(self.items), len(self._waiters))
|
||||
params = (self.__class__.__name__, hex(id(self)),
|
||||
len(self.items), len(self._waiters))
|
||||
return '<%s at %s items[%d] _waiters[%s]>' % params
|
||||
|
||||
def send(self, result=None, exc=None):
|
||||
@@ -221,7 +221,9 @@ class Channel(object):
|
||||
return len(self.items)
|
||||
|
||||
def __repr__(self):
|
||||
params = (self.__class__.__name__, hex(id(self)), self.max_size, len(self.items), len(self._waiters), len(self._senders))
|
||||
params = (self.__class__.__name__, hex(id(self)),
|
||||
self.max_size, len(self.items),
|
||||
len(self._waiters), len(self._senders))
|
||||
return '<%s at %s max=%s items[%d] _w[%s] _s[%s]>' % params
|
||||
|
||||
def send(self, result=None, exc=None):
|
||||
@@ -397,4 +399,3 @@ class Actor(object):
|
||||
>>> eventlet.kill(a._killer) # test cleanup
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@@ -4,6 +4,7 @@ from eventlet.green import SimpleHTTPServer
|
||||
from eventlet.green import urllib
|
||||
from eventlet.green import select
|
||||
|
||||
test = None # bind prior to patcher.inject to silence pyflakes warning below
|
||||
patcher.inject('CGIHTTPServer',
|
||||
globals(),
|
||||
('BaseHTTPServer', BaseHTTPServer),
|
||||
@@ -14,4 +15,4 @@ patcher.inject('CGIHTTPServer',
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
test() # pyflakes false alarm here unless test = None above
|
||||
|
@@ -21,6 +21,3 @@ def _patch_main_thread(mod):
|
||||
curthread = mod._active.pop(mod._get_ident(), None)
|
||||
if curthread:
|
||||
mod._active[thread.get_ident()] = curthread
|
||||
|
||||
if __name__ == '__main__':
|
||||
_test()
|
||||
|
@@ -3,3 +3,4 @@ for var in dir(__time):
|
||||
exec "%s = __time.%s" % (var, var)
|
||||
__patched__ = ['sleep']
|
||||
from eventlet.greenthread import sleep
|
||||
sleep # silence pyflakes
|
||||
|
@@ -1,7 +1,4 @@
|
||||
import eventlet
|
||||
from eventlet.hubs import trampoline
|
||||
from eventlet.hubs import get_hub
|
||||
|
||||
BUFFER_SIZE = 4096
|
||||
|
||||
import errno
|
||||
@@ -12,10 +9,6 @@ import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
|
||||
from errno import EWOULDBLOCK, EAGAIN
|
||||
|
||||
|
||||
__all__ = ['GreenSocket', 'GreenPipe', 'shutdown_safe']
|
||||
|
||||
CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
|
||||
@@ -195,11 +188,11 @@ class GreenSocket(object):
|
||||
else:
|
||||
end = time.time() + self.gettimeout()
|
||||
while True:
|
||||
if socket_connect(fd, address):
|
||||
return 0
|
||||
if time.time() >= end:
|
||||
raise socket.timeout(errno.EAGAIN)
|
||||
try:
|
||||
if socket_connect(fd, address):
|
||||
return 0
|
||||
if time.time() >= end:
|
||||
raise socket.timeout(errno.EAGAIN)
|
||||
trampoline(fd, write=True, timeout=end-time.time(),
|
||||
timeout_exc=socket.timeout(errno.EAGAIN))
|
||||
except socket.error, ex:
|
||||
@@ -302,7 +295,6 @@ class GreenSocket(object):
|
||||
return total_sent
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
fd = self.fd
|
||||
tail = self.send(data, flags)
|
||||
len_data = len(data)
|
||||
while tail < len_data:
|
||||
@@ -375,7 +367,7 @@ class GreenPipe(object):
|
||||
try:
|
||||
return fd.read(buflen)
|
||||
except IOError, e:
|
||||
if e[0] != EAGAIN:
|
||||
if e[0] != errno.EAGAIN:
|
||||
return ''
|
||||
except socket.error, e:
|
||||
if e[0] == errno.EPIPE:
|
||||
@@ -407,7 +399,7 @@ class GreenPipe(object):
|
||||
fd.flush()
|
||||
return len(data)
|
||||
except IOError, e:
|
||||
if e[0] != EAGAIN:
|
||||
if e[0] != errno.EAGAIN:
|
||||
raise
|
||||
except ValueError, e:
|
||||
# what's this for?
|
||||
@@ -589,4 +581,3 @@ def serve(sock, handle, concurrency=1000):
|
||||
connections until the existing ones complete.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
@@ -30,11 +30,12 @@ class DebugListener(FdListener):
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
super(DebugListener, self).__init__(evtype, fileno, cb)
|
||||
def __repr__(self):
|
||||
return "DebugListener(%r, %r, %r, %r)\n%sEndDebugFdListener" % (self.evtype,
|
||||
self.fileno,
|
||||
self.cb,
|
||||
self.greenlet,
|
||||
''.join(self.where_called))
|
||||
return "DebugListener(%r, %r, %r, %r)\n%sEndDebugFdListener" % (
|
||||
self.evtype,
|
||||
self.fileno,
|
||||
self.cb,
|
||||
self.greenlet,
|
||||
''.join(self.where_called))
|
||||
__str__ = __repr__
|
||||
|
||||
|
||||
@@ -231,8 +232,6 @@ class BaseHub(object):
|
||||
t = self.timers
|
||||
heappop = heapq.heappop
|
||||
|
||||
i = 0
|
||||
|
||||
while t:
|
||||
next = t[0]
|
||||
|
||||
|
@@ -43,7 +43,9 @@ class Hub(BaseHub):
|
||||
event.init()
|
||||
|
||||
self.signal_exc_info = None
|
||||
self.signal(2, lambda signalnum, frame: self.greenlet.parent.throw(KeyboardInterrupt))
|
||||
self.signal(
|
||||
2,
|
||||
lambda signalnum, frame: self.greenlet.parent.throw(KeyboardInterrupt))
|
||||
self.events_to_add = []
|
||||
|
||||
def dispatch(self):
|
||||
@@ -76,7 +78,8 @@ class Hub(BaseHub):
|
||||
raise
|
||||
except:
|
||||
if self.signal_exc_info is not None:
|
||||
self.schedule_call_global(0, greenlet.getcurrent().parent.throw, *self.signal_exc_info)
|
||||
self.schedule_call_global(
|
||||
0, greenlet.getcurrent().parent.throw, *self.signal_exc_info)
|
||||
self.signal_exc_info = None
|
||||
else:
|
||||
self.squelch_timer_exception(None, sys.exc_info())
|
||||
@@ -128,7 +131,7 @@ class Hub(BaseHub):
|
||||
for listener in l_list:
|
||||
try:
|
||||
listener.cb.delete()
|
||||
except SYSTEM_EXCEPTIONS:
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
traceback.print_exc()
|
||||
@@ -169,4 +172,3 @@ def _scheduled_call_local(event_impl, handle, evtype, arg):
|
||||
cb(*args, **kwargs)
|
||||
finally:
|
||||
event_impl.delete()
|
||||
|
||||
|
@@ -74,8 +74,8 @@ class socket_rwdescriptor(FdListener):
|
||||
# to the mainloop occurs, twisted will not re-evaluate the delayed calls
|
||||
# because it assumes that none were scheduled since no client code was executed
|
||||
# (it has no idea it was switched away). So, we restart the mainloop.
|
||||
# XXX this is not enough, pollreactor prints the traceback for this and epollreactor
|
||||
# times out. see test__hub.TestCloseSocketWhilePolling
|
||||
# XXX this is not enough, pollreactor prints the traceback for
|
||||
# this and epollreactor times out. see test__hub.TestCloseSocketWhilePolling
|
||||
raise greenlet.GreenletExit
|
||||
|
||||
logstr = "twistedr"
|
||||
@@ -103,9 +103,10 @@ class BaseTwistedHub(object):
|
||||
self.greenlet = mainloop_greenlet
|
||||
|
||||
def switch(self):
|
||||
assert api.getcurrent() is not self.greenlet, "Cannot switch from MAINLOOP to MAINLOOP"
|
||||
assert getcurrent() is not self.greenlet, \
|
||||
"Cannot switch from MAINLOOP to MAINLOOP"
|
||||
try:
|
||||
api.getcurrent().parent = self.greenlet
|
||||
getcurrent().parent = self.greenlet
|
||||
except ValueError:
|
||||
pass
|
||||
return self.greenlet.switch()
|
||||
@@ -134,7 +135,8 @@ class BaseTwistedHub(object):
|
||||
if timer.greenlet.dead:
|
||||
return
|
||||
return func(*args1, **kwargs1)
|
||||
timer = callLater(LocalDelayedCall, reactor, seconds, call_if_greenlet_alive, *args, **kwargs)
|
||||
timer = callLater(LocalDelayedCall, reactor, seconds,
|
||||
call_if_greenlet_alive, *args, **kwargs)
|
||||
return timer
|
||||
|
||||
schedule_call = schedule_call_local
|
||||
@@ -189,18 +191,22 @@ class TwistedHub(BaseTwistedHub):
|
||||
installSignalHandlers = False
|
||||
|
||||
def __init__(self):
|
||||
assert Hub.state==0, ('%s hub can only be instantiated once' % type(self).__name__, Hub.state)
|
||||
assert Hub.state==0, ('%s hub can only be instantiated once'%type(self).__name__,
|
||||
Hub.state)
|
||||
Hub.state = 1
|
||||
make_twisted_threadpool_daemonic() # otherwise the program would hang after the main greenlet exited
|
||||
g = api.Greenlet(self.run)
|
||||
make_twisted_threadpool_daemonic() # otherwise the program
|
||||
# would hang after the main
|
||||
# greenlet exited
|
||||
g = greenlet.greenlet(self.run)
|
||||
BaseTwistedHub.__init__(self, g)
|
||||
|
||||
def switch(self):
|
||||
assert api.getcurrent() is not self.greenlet, "Cannot switch from MAINLOOP to MAINLOOP"
|
||||
assert getcurrent() is not self.greenlet, \
|
||||
"Cannot switch from MAINLOOP to MAINLOOP"
|
||||
if self.greenlet.dead:
|
||||
self.greenlet = api.Greenlet(self.run)
|
||||
self.greenlet = greenlet.greenlet(self.run)
|
||||
try:
|
||||
api.getcurrent().parent = self.greenlet
|
||||
getcurrent().parent = self.greenlet
|
||||
except ValueError:
|
||||
pass
|
||||
return self.greenlet.switch()
|
||||
@@ -255,5 +261,3 @@ def make_twisted_threadpool_daemonic():
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
if ThreadPool.threadFactory != DaemonicThread:
|
||||
ThreadPool.threadFactory = DaemonicThread
|
||||
|
||||
|
||||
|
@@ -35,7 +35,8 @@ def inject(module_name, new_globals, *additional_modules):
|
||||
saved[name] = sys.modules.get(name, None)
|
||||
sys.modules[name] = mod
|
||||
|
||||
## Remove the old module from sys.modules and reimport it while the specified modules are in place
|
||||
## Remove the old module from sys.modules and reimport it while
|
||||
## the specified modules are in place
|
||||
old_module = sys.modules.pop(module_name, None)
|
||||
try:
|
||||
module = __import__(module_name, {}, {}, module_name.split('.')[:-1])
|
||||
@@ -73,9 +74,9 @@ def import_patched(module_name, *additional_modules, **kw_additional_modules):
|
||||
The only required argument is the name of the module to be imported.
|
||||
"""
|
||||
return inject(
|
||||
module_name,
|
||||
None,
|
||||
*additional_modules + tuple(kw_additional_modules.items()))
|
||||
module_name,
|
||||
None,
|
||||
*additional_modules + tuple(kw_additional_modules.items()))
|
||||
|
||||
|
||||
def patch_function(func, *additional_modules):
|
||||
@@ -159,11 +160,12 @@ def monkey_patch(all=True, os=False, select=False,
|
||||
|
||||
for name, mod in modules_to_patch:
|
||||
orig_mod = sys.modules.get(name)
|
||||
for attr in mod.__patched__:
|
||||
orig_attr = getattr(orig_mod, attr, None)
|
||||
patched_attr = getattr(mod, attr, None)
|
||||
for attr_name in mod.__patched__:
|
||||
#orig_attr = getattr(orig_mod, attr_name, None)
|
||||
# @@tavis: line above wasn't used, not sure what author intended
|
||||
patched_attr = getattr(mod, attr_name, None)
|
||||
if patched_attr is not None:
|
||||
setattr(orig_mod, attr, patched_attr)
|
||||
setattr(orig_mod, attr_name, patched_attr)
|
||||
|
||||
def _green_os_modules():
|
||||
from eventlet.green import os
|
||||
|
@@ -165,5 +165,3 @@ class TokenPool(Pool):
|
||||
"""
|
||||
def create(self):
|
||||
return Token()
|
||||
|
||||
|
||||
|
@@ -11,7 +11,6 @@ import popen2
|
||||
import signal
|
||||
|
||||
from eventlet import api
|
||||
from eventlet import coros
|
||||
from eventlet import pools
|
||||
from eventlet import greenio
|
||||
|
||||
@@ -69,7 +68,9 @@ class Process(object):
|
||||
greenio.set_nonblocking(child_stdout_stderr)
|
||||
greenio.set_nonblocking(child_stdin)
|
||||
self.child_stdout_stderr = greenio.GreenPipe(child_stdout_stderr)
|
||||
self.child_stdout_stderr.newlines = '\n' # the default is \r\n, which aren't sent over pipes
|
||||
self.child_stdout_stderr.newlines = '\n' # the default is
|
||||
# \r\n, which aren't sent over
|
||||
# pipes
|
||||
self.child_stdin = greenio.GreenPipe(child_stdin)
|
||||
self.child_stdin.newlines = '\n'
|
||||
|
||||
|
@@ -36,7 +36,10 @@ def wrap(obj, dead_callback = None):
|
||||
return wrap_module(obj.__name__, dead_callback)
|
||||
pythonpath_sync()
|
||||
if _g_debug_mode:
|
||||
p = Process(sys.executable, ["-W", "ignore", __file__, '--child', '--logfile', os.path.join(tempfile.gettempdir(), 'saranwrap.log')], dead_callback)
|
||||
p = Process(sys.executable,
|
||||
["-W", "ignore", __file__, '--child',
|
||||
'--logfile', os.path.join(tempfile.gettempdir(), 'saranwrap.log')],
|
||||
dead_callback)
|
||||
else:
|
||||
p = Process(sys.executable, ["-W", "ignore", __file__, '--child'], dead_callback)
|
||||
prox = Proxy(ChildProcess(p, p))
|
||||
@@ -53,9 +56,13 @@ def wrap_module(fqname, dead_callback = None):
|
||||
pythonpath_sync()
|
||||
global _g_debug_mode
|
||||
if _g_debug_mode:
|
||||
p = Process(sys.executable, ["-W", "ignore", __file__, '--module', fqname, '--logfile', os.path.join(tempfile.gettempdir(), 'saranwrap.log')], dead_callback)
|
||||
p = Process(sys.executable,
|
||||
["-W", "ignore", __file__, '--module', fqname,
|
||||
'--logfile', os.path.join(tempfile.gettempdir(), 'saranwrap.log')],
|
||||
dead_callback)
|
||||
else:
|
||||
p = Process(sys.executable, ["-W", "ignore", __file__, '--module', fqname,], dead_callback)
|
||||
p = Process(sys.executable,
|
||||
["-W", "ignore", __file__, '--module', fqname,], dead_callback)
|
||||
prox = Proxy(ChildProcess(p,p))
|
||||
return prox
|
||||
|
||||
@@ -140,7 +147,8 @@ def _write_request(param, output):
|
||||
|
||||
def _is_local(attribute):
|
||||
"Return ``True`` if the attribute should be handled locally"
|
||||
# return attribute in ('_in', '_out', '_id', '__getattribute__', '__setattr__', '__dict__')
|
||||
# return attribute in ('_in', '_out', '_id', '__getattribute__',
|
||||
# '__setattr__', '__dict__')
|
||||
# good enough for now. :)
|
||||
if '__local_dict' in attribute:
|
||||
return True
|
||||
@@ -266,7 +274,8 @@ class Proxy(object):
|
||||
my_cp = self.__local_dict['_cp']
|
||||
my_id = self.__local_dict['_id']
|
||||
# Pass the set attribute across
|
||||
request = Request('setattr', {'id':my_id, 'attribute':attribute, 'value':value})
|
||||
request = Request('setattr',
|
||||
{'id':my_id, 'attribute':attribute, 'value':value})
|
||||
return my_cp.make_request(request, attribute=attribute)
|
||||
|
||||
class ObjectProxy(Proxy):
|
||||
@@ -324,7 +333,8 @@ class ObjectProxy(Proxy):
|
||||
return self.__str__()
|
||||
|
||||
def __nonzero__(self):
|
||||
# bool(obj) is another method that skips __getattribute__. There's no good way to just pass
|
||||
# bool(obj) is another method that skips __getattribute__.
|
||||
# There's no good way to just pass
|
||||
# the method on, so we use a special message.
|
||||
my_cp = self.__local_dict['_cp']
|
||||
my_id = self.__local_dict['_id']
|
||||
@@ -395,7 +405,9 @@ class CallableProxy(object):
|
||||
# having already checked if the method starts with '_' so we
|
||||
# can safely pass this one to the remote object.
|
||||
#_prnt("calling %s %s" % (self._object_id, self._name)
|
||||
request = Request('call', {'id':self._object_id, 'name':self._name, 'args':args, 'kwargs':kwargs})
|
||||
request = Request('call', {'id':self._object_id,
|
||||
'name':self._name,
|
||||
'args':args, 'kwargs':kwargs})
|
||||
return self._cp.make_request(request, attribute=self._name)
|
||||
|
||||
class Server(object):
|
||||
@@ -444,14 +456,15 @@ class Server(object):
|
||||
|
||||
def handle_setitem(self, obj, req):
|
||||
obj[req['key']] = req['value']
|
||||
return None # *TODO figure out what the actual return value of __setitem__ should be
|
||||
return None # *TODO figure out what the actual return value
|
||||
# of __setitem__ should be
|
||||
|
||||
def handle_eq(self, obj, req):
|
||||
#_log("__eq__ %s %s" % (obj, req))
|
||||
rhs = None
|
||||
try:
|
||||
rhs = self._objects[req['rhs']]
|
||||
except KeyError, e:
|
||||
except KeyError:
|
||||
return False
|
||||
return (obj == rhs)
|
||||
|
||||
@@ -565,7 +578,7 @@ class Server(object):
|
||||
#_log("objects: %s" % self._objects)
|
||||
s = Pickle.dumps(body)
|
||||
_log(`s`)
|
||||
str_ = _write_lp_hunk(self._out, s)
|
||||
_write_lp_hunk(self._out, s)
|
||||
|
||||
def write_exception(self, e):
|
||||
"""Helper method to respond with an exception."""
|
||||
@@ -621,14 +634,16 @@ def named(name):
|
||||
import_err_strings.append(err.__str__())
|
||||
toimport = '.'.join(toimport.split('.')[:-1])
|
||||
if obj is None:
|
||||
raise ImportError('%s could not be imported. Import errors: %r' % (name, import_err_strings))
|
||||
raise ImportError(
|
||||
'%s could not be imported. Import errors: %r' % (name, import_err_strings))
|
||||
for seg in name.split('.')[1:]:
|
||||
try:
|
||||
obj = getattr(obj, seg)
|
||||
except AttributeError:
|
||||
dirobj = dir(obj)
|
||||
dirobj.sort()
|
||||
raise AttributeError('attribute %r missing from %r (%r) %r. Import errors: %r' % (
|
||||
raise AttributeError(
|
||||
'attribute %r missing from %r (%r) %r. Import errors: %r' % (
|
||||
seg, obj, dirobj, name, import_err_strings))
|
||||
return obj
|
||||
|
||||
|
@@ -17,5 +17,6 @@ except ImportError, e:
|
||||
except ImportError:
|
||||
try:
|
||||
from support.stacklesss import greenlet, getcurrent, GreenletExit
|
||||
(greenlet, getcurrent, GreenletExit) # silence pyflakes
|
||||
except ImportError, e:
|
||||
raise ImportError("Unable to find an implementation of greenlet.")
|
||||
|
@@ -30,7 +30,7 @@ class FirstSwitch(object):
|
||||
gr.t = t
|
||||
tasklet_to_greenlet[t] = gr
|
||||
t.setup(*args, **kw)
|
||||
result = t.run()
|
||||
t.run()
|
||||
|
||||
|
||||
class greenlet(object):
|
||||
@@ -75,10 +75,10 @@ def emulate():
|
||||
module.getcurrent = getcurrent
|
||||
module.GreenletExit = GreenletExit
|
||||
|
||||
caller = t = stackless.getcurrent()
|
||||
tasklet_to_greenlet[t] = None
|
||||
caller = stackless.getcurrent()
|
||||
tasklet_to_greenlet[caller] = None
|
||||
main_coro = greenlet()
|
||||
tasklet_to_greenlet[t] = main_coro
|
||||
main_coro.t = t
|
||||
tasklet_to_greenlet[caller] = main_coro
|
||||
main_coro.t = caller
|
||||
del main_coro.switch ## It's already running
|
||||
coro_args[main_coro] = None
|
||||
|
@@ -74,9 +74,13 @@ def tworker():
|
||||
rv = meth(*args,**kwargs)
|
||||
except SYS_EXCS:
|
||||
raise
|
||||
except Exception,exn:
|
||||
except Exception:
|
||||
rv = sys.exc_info()
|
||||
_rspq.put((e,rv))
|
||||
_rspq.put((e,rv)) # @@tavis: not supposed to
|
||||
# keep references to
|
||||
# sys.exc_info() so it would
|
||||
# be worthwhile testing
|
||||
# if this leads to memory leaks
|
||||
meth = args = kwargs = e = rv = None
|
||||
_signal_t2e()
|
||||
|
||||
|
@@ -4,10 +4,10 @@ You generally don't have to use it unless you need to call reactor.run()
|
||||
yourself.
|
||||
"""
|
||||
from eventlet.hubs.twistedr import BaseTwistedHub
|
||||
from eventlet.api import use_hub, _threadlocal
|
||||
from eventlet import use_hub
|
||||
from eventlet.support import greenlets as greenlet
|
||||
from eventlet.hubs import _threadlocal
|
||||
|
||||
use_hub(BaseTwistedHub)
|
||||
assert not hasattr(_threadlocal, 'hub')
|
||||
hub = _threadlocal.hub = _threadlocal.Hub(greenlet.getcurrent())
|
||||
|
||||
|
@@ -1,12 +1,11 @@
|
||||
import os
|
||||
import socket
|
||||
import errno
|
||||
import warnings
|
||||
|
||||
from eventlet import greenio
|
||||
|
||||
def g_log(*args):
|
||||
warnings.warn("eventlet.util.g_log is deprecated because we're pretty sure no one uses it. Send mail to eventletdev@lists.secondlife.com if you are actually using it.",
|
||||
warnings.warn("eventlet.util.g_log is deprecated because "
|
||||
"we're pretty sure no one uses it. "
|
||||
"Send mail to eventletdev@lists.secondlife.com "
|
||||
"if you are actually using it.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
import sys
|
||||
from eventlet.support import greenlets as greenlet
|
||||
@@ -49,7 +48,8 @@ except ImportError:
|
||||
try:
|
||||
from eventlet.green.OpenSSL import SSL
|
||||
except ImportError:
|
||||
raise ImportError("To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.6 or later.")
|
||||
raise ImportError("To use SSL with Eventlet, "
|
||||
"you must install PyOpenSSL or use Python 2.6 or later.")
|
||||
context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
if certificate is not None:
|
||||
context.use_certificate_file(certificate)
|
||||
@@ -126,4 +126,3 @@ def set_reuse_addr(descriptor):
|
||||
descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1)
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
|
@@ -15,7 +15,8 @@ DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
|
||||
DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
|
||||
MAX_REQUEST_LINE = 8192
|
||||
MINIMUM_CHUNK_SIZE = 4096
|
||||
DEFAULT_LOG_FORMAT='%(client_ip)s - - [%(date_time)s] "%(request_line)s" %(status_code)s %(body_length)s %(wall_seconds).6f'
|
||||
DEFAULT_LOG_FORMAT= ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
|
||||
' %(status_code)s %(body_length)s %(wall_seconds).6f')
|
||||
|
||||
__all__ = ['server', 'format_date_time']
|
||||
|
||||
@@ -173,7 +174,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
self.raw_requestline = self.rfile.readline(MAX_REQUEST_LINE)
|
||||
if len(self.raw_requestline) == MAX_REQUEST_LINE:
|
||||
self.wfile.write(
|
||||
"HTTP/1.0 414 Request URI Too Long\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
|
||||
"HTTP/1.0 414 Request URI Too Long\r\n"
|
||||
"Connection: close\r\nContent-length: 0\r\n\r\n")
|
||||
self.close_connection = 1
|
||||
return
|
||||
except greenio.SSL.ZeroReturnError:
|
||||
@@ -277,7 +279,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
_writelines(towrite)
|
||||
length[0] = length[0] + sum(map(len, towrite))
|
||||
except UnicodeEncodeError:
|
||||
print "Encountered unicode while attempting to write wsgi response: ", [x for x in towrite if isinstance(x, unicode)]
|
||||
print "Encountered unicode while attempting to write wsgi response: ", \
|
||||
[x for x in towrite if isinstance(x, unicode)]
|
||||
traceback.print_exc()
|
||||
_writelines(
|
||||
["HTTP/1.0 500 Internal Server Error\r\n",
|
||||
@@ -285,7 +288,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
"Content-type: text/plain\r\n",
|
||||
"Content-length: 98\r\n",
|
||||
"\r\n",
|
||||
"Internal Server Error: wsgi application passed a unicode object to the server instead of a string."])
|
||||
("Internal Server Error: wsgi application passed "
|
||||
"a unicode object to the server instead of a string.")])
|
||||
|
||||
def start_response(status, response_headers, exc_info=None):
|
||||
status_code[0] = status.split()[0]
|
||||
@@ -298,7 +302,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
# Avoid dangling circular ref
|
||||
exc_info = None
|
||||
|
||||
capitalized_headers = [('-'.join([x.capitalize() for x in key.split('-')]), value)
|
||||
capitalized_headers = [('-'.join([x.capitalize()
|
||||
for x in key.split('-')]), value)
|
||||
for key, value in response_headers]
|
||||
|
||||
headers_set[:] = [status, capitalized_headers]
|
||||
@@ -329,17 +334,19 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
write(''.join(towrite))
|
||||
if not headers_sent or (use_chunked[0] and just_written_size):
|
||||
write('')
|
||||
except Exception, e:
|
||||
except Exception:
|
||||
self.close_connection = 1
|
||||
exc = traceback.format_exc()
|
||||
print exc
|
||||
if not headers_set:
|
||||
start_response("500 Internal Server Error", [('Content-type', 'text/plain')])
|
||||
start_response("500 Internal Server Error",
|
||||
[('Content-type', 'text/plain')])
|
||||
write(exc)
|
||||
finally:
|
||||
if hasattr(result, 'close'):
|
||||
result.close()
|
||||
if self.environ['eventlet.input'].position < self.environ.get('CONTENT_LENGTH', 0):
|
||||
if (self.environ['eventlet.input'].position
|
||||
< self.environ.get('CONTENT_LENGTH', 0)):
|
||||
## Read and discard body if there was no pending 100-continue
|
||||
if not self.environ['eventlet.input'].wfile:
|
||||
while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
|
||||
@@ -454,7 +461,6 @@ class Server(BaseHTTPServer.HTTPServer):
|
||||
self.log_format = log_format
|
||||
|
||||
def get_environ(self):
|
||||
socket = self.socket
|
||||
d = {
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.version': (1, 0),
|
||||
@@ -543,7 +549,8 @@ def server(sock, site,
|
||||
if port == ':80':
|
||||
port = ''
|
||||
|
||||
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (os.getpid(), scheme, host, port))
|
||||
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (
|
||||
os.getpid(), scheme, host, port))
|
||||
while True:
|
||||
try:
|
||||
client_socket = sock.accept()
|
||||
@@ -572,4 +579,3 @@ def server(sock, site,
|
||||
except socket.error, e:
|
||||
if get_errno(e) not in BROKEN_SOCK:
|
||||
traceback.print_exc()
|
||||
|
||||
|
@@ -1,11 +1,14 @@
|
||||
"Test cases for db_pool"
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
from unittest import TestCase, main
|
||||
|
||||
from tests import skipped, skip_unless, skip_with_pyevent
|
||||
from unittest import TestCase, main
|
||||
from eventlet import event
|
||||
from eventlet import db_pool
|
||||
import eventlet
|
||||
import os
|
||||
|
||||
|
||||
class DBTester(object):
|
||||
__test__ = False # so that nose doesn't try to execute this directly
|
||||
@@ -26,7 +29,7 @@ class DBTester(object):
|
||||
self.connection.close()
|
||||
self.drop_db()
|
||||
|
||||
def set_up_dummy_table(self, connection = None):
|
||||
def set_up_dummy_table(self, connection=None):
|
||||
close_connection = False
|
||||
if connection is None:
|
||||
close_connection = True
|
||||
@@ -84,7 +87,7 @@ class DBConnectionPool(DBTester):
|
||||
self.assert_(False)
|
||||
except AssertionError:
|
||||
raise
|
||||
except Exception, e:
|
||||
except Exception:
|
||||
pass
|
||||
cursor.close()
|
||||
|
||||
@@ -144,7 +147,6 @@ class DBConnectionPool(DBTester):
|
||||
curs.execute(SHORT_QUERY)
|
||||
results.append(2)
|
||||
evt.send()
|
||||
evt2 = event.Event()
|
||||
eventlet.spawn(a_query)
|
||||
results.append(1)
|
||||
self.assertEqual([1], results)
|
||||
@@ -299,7 +301,10 @@ class DBConnectionPool(DBTester):
|
||||
|
||||
@skipped
|
||||
def test_max_idle(self):
|
||||
# This test is timing-sensitive. Rename the function without the "dont" to run it, but beware that it could fail or take a while.
|
||||
# This test is timing-sensitive. Rename the function without
|
||||
# the "dont" to run it, but beware that it could fail or take
|
||||
# a while.
|
||||
|
||||
self.pool = self.create_pool(max_size=2, max_idle=0.02)
|
||||
self.connection = self.pool.get()
|
||||
self.connection.close()
|
||||
@@ -319,7 +324,10 @@ class DBConnectionPool(DBTester):
|
||||
|
||||
@skipped
|
||||
def test_max_idle_many(self):
|
||||
# This test is timing-sensitive. Rename the function without the "dont" to run it, but beware that it could fail or take a while.
|
||||
# This test is timing-sensitive. Rename the function without
|
||||
# the "dont" to run it, but beware that it could fail or take
|
||||
# a while.
|
||||
|
||||
self.pool = self.create_pool(max_size=2, max_idle=0.02)
|
||||
self.connection, conn2 = self.pool.get(), self.pool.get()
|
||||
self.connection.close()
|
||||
@@ -332,7 +340,10 @@ class DBConnectionPool(DBTester):
|
||||
|
||||
@skipped
|
||||
def test_max_age(self):
|
||||
# This test is timing-sensitive. Rename the function without the "dont" to run it, but beware that it could fail or take a while.
|
||||
# This test is timing-sensitive. Rename the function without
|
||||
# the "dont" to run it, but beware that it could fail or take
|
||||
# a while.
|
||||
|
||||
self.pool = self.create_pool(max_size=2, max_age=0.05)
|
||||
self.connection = self.pool.get()
|
||||
self.connection.close()
|
||||
@@ -347,7 +358,10 @@ class DBConnectionPool(DBTester):
|
||||
|
||||
@skipped
|
||||
def test_max_age_many(self):
|
||||
# This test is timing-sensitive. Rename the function without the "dont" to run it, but beware that it could fail or take a while.
|
||||
# This test is timing-sensitive. Rename the function without
|
||||
# the "dont" to run it, but beware that it could fail or take
|
||||
# a while.
|
||||
|
||||
self.pool = self.create_pool(max_size=2, max_age=0.15)
|
||||
self.connection, conn2 = self.pool.get(), self.pool.get()
|
||||
self.connection.close()
|
||||
@@ -424,7 +438,8 @@ class RaisingDBModule(object):
|
||||
|
||||
class TpoolConnectionPool(DBConnectionPool):
|
||||
__test__ = False # so that nose doesn't try to execute this directly
|
||||
def create_pool(self, max_size = 1, max_idle = 10, max_age = 10, connect_timeout=0.5, module=None):
|
||||
def create_pool(self, max_size=1, max_idle=10, max_age=10,
|
||||
connect_timeout=0.5, module=None):
|
||||
if module is None:
|
||||
module = self._dbmodule
|
||||
return db_pool.TpooledConnectionPool(module,
|
||||
@@ -447,7 +462,8 @@ class TpoolConnectionPool(DBConnectionPool):
|
||||
|
||||
class RawConnectionPool(DBConnectionPool):
|
||||
__test__ = False # so that nose doesn't try to execute this directly
|
||||
def create_pool(self, max_size = 1, max_idle = 10, max_age = 10, connect_timeout= 0.5, module=None):
|
||||
def create_pool(self, max_size=1, max_idle=10, max_age=10,
|
||||
connect_timeout=0.5, module=None):
|
||||
if module is None:
|
||||
module = self._dbmodule
|
||||
return db_pool.RawConnectionPool(module,
|
||||
@@ -458,8 +474,9 @@ class RawConnectionPool(DBConnectionPool):
|
||||
|
||||
|
||||
def get_auth():
|
||||
"""Looks in the local directory and in the user's home directory for a file named ".test_dbauth",
|
||||
which contains a json map of parameters to the connect function.
|
||||
"""Looks in the local directory and in the user's home directory
|
||||
for a file named ".test_dbauth", which contains a json map of
|
||||
parameters to the connect function.
|
||||
"""
|
||||
files = [os.path.join(os.path.dirname(__file__), '.test_dbauth'),
|
||||
os.path.join(os.path.expanduser('~'), '.test_dbauth')]
|
||||
@@ -473,13 +490,14 @@ def get_auth():
|
||||
return dict([(str(modname), dict([(str(k), str(v))
|
||||
for k, v in connectargs.items()]))
|
||||
for modname, connectargs in auth_utf8.items()])
|
||||
except (IOError, ImportError), e:
|
||||
except (IOError, ImportError):
|
||||
pass
|
||||
return {'MySQLdb':{'host': 'localhost','user': 'root','passwd': ''},
|
||||
'psycopg2':{'user':'test'}}
|
||||
|
||||
|
||||
def mysql_requirement(_f):
|
||||
verbose = os.environ.get('eventlet_test_mysql_verbose')
|
||||
try:
|
||||
import MySQLdb
|
||||
try:
|
||||
@@ -487,12 +505,13 @@ def mysql_requirement(_f):
|
||||
MySQLdb.connect(**auth)
|
||||
return True
|
||||
except MySQLdb.OperationalError:
|
||||
print "Skipping mysql tests, error when connecting"
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if verbose:
|
||||
print >> sys.stderr, ">> Skipping mysql tests, error when connecting:"
|
||||
traceback.print_exc()
|
||||
return False
|
||||
except ImportError:
|
||||
print "Skipping mysql tests, MySQLdb not importable"
|
||||
if verbose:
|
||||
print >> sys.stderr, ">> Skipping mysql tests, MySQLdb not importable"
|
||||
return False
|
||||
|
||||
class MysqlConnectionPool(object):
|
||||
@@ -600,7 +619,6 @@ class Psycopg2ConnectionPool(object):
|
||||
|
||||
def drop_db(self):
|
||||
auth = self._auth.copy()
|
||||
dbname = auth.pop('database')
|
||||
conn = self._dbmodule.connect(**auth)
|
||||
conn.set_isolation_level(0)
|
||||
db = conn.cursor()
|
||||
|
@@ -40,7 +40,7 @@ class TestGreenIo(LimitedTestCase):
|
||||
self.assertEqual(e.args[0], 'timed out')
|
||||
except socket.error, e:
|
||||
# unreachable is also a valid outcome
|
||||
if e[0] != errno.EHOSTUNREACH:
|
||||
if not e[0] in (errno.EHOSTUNREACH, errno.ENETUNREACH):
|
||||
raise
|
||||
|
||||
def test_accept_timeout(self):
|
||||
@@ -62,7 +62,8 @@ class TestGreenIo(LimitedTestCase):
|
||||
s.settimeout(0.1)
|
||||
gs = greenio.GreenSocket(s)
|
||||
e = gs.connect_ex(('192.0.2.1', 80))
|
||||
self.assertEquals(e, errno.EAGAIN)
|
||||
if not e in (errno.EHOSTUNREACH, errno.ENETUNREACH):
|
||||
self.assertEquals(e, errno.EAGAIN)
|
||||
|
||||
def test_recv_timeout(self):
|
||||
listener = greenio.GreenSocket(socket.socket())
|
||||
|
Reference in New Issue
Block a user