From 03f38ec9f025b163bc9ac389ba194d528bb56a30 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 23 Aug 2007 19:57:41 -0400 Subject: [PATCH 02/79] [svn r3] Moved eventlet into trunk. --- README | 38 +++ eventlet/__init__.py | 24 ++ eventlet/api.py | 305 ++++++++++++++++++++++ eventlet/api_test.py | 164 ++++++++++++ eventlet/backdoor.py | 85 ++++++ eventlet/channel.py | 98 +++++++ eventlet/coros.py | 167 ++++++++++++ eventlet/coros_test.py | 144 ++++++++++ eventlet/greenlib.py | 323 +++++++++++++++++++++++ eventlet/httpc.py | 334 ++++++++++++++++++++++++ eventlet/httpd.py | 491 +++++++++++++++++++++++++++++++++++ eventlet/httpd_test.py | 139 ++++++++++ eventlet/httpdate.py | 39 +++ eventlet/jsonhttp.py | 31 +++ eventlet/kqueuehub.py | 219 ++++++++++++++++ eventlet/logutil.py | 112 ++++++++ eventlet/pollhub.py | 189 ++++++++++++++ eventlet/pools.py | 174 +++++++++++++ eventlet/pools_test.py | 179 +++++++++++++ eventlet/processes.py | 138 ++++++++++ eventlet/processes_test.py | 134 ++++++++++ eventlet/pylibsupport.py | 42 +++ eventlet/runloop.py | 200 ++++++++++++++ eventlet/runloop_test.py | 157 +++++++++++ eventlet/selecthub.py | 173 ++++++++++++ eventlet/stacklesssupport.py | 110 ++++++++ eventlet/tests.py | 36 +++ eventlet/timer.py | 72 +++++ eventlet/timer_test.py | 66 +++++ eventlet/tls.py | 57 ++++ eventlet/twistedsupport.py | 134 ++++++++++ eventlet/util.py | 214 +++++++++++++++ eventlet/wrappedfd.py | 262 +++++++++++++++++++ eventlet/wsgi.py | 219 ++++++++++++++++ examples/echoserver.py | 52 ++++ examples/webcrawler.py | 55 ++++ 36 files changed, 5376 insertions(+) create mode 100644 README create mode 100644 eventlet/__init__.py create mode 100644 eventlet/api.py create mode 100644 eventlet/api_test.py create mode 100644 eventlet/backdoor.py create mode 100644 eventlet/channel.py create mode 100644 eventlet/coros.py create mode 100644 eventlet/coros_test.py create mode 100644 eventlet/greenlib.py create mode 100755 eventlet/httpc.py create mode 100644 eventlet/httpd.py create mode 100644 eventlet/httpd_test.py create mode 100644 eventlet/httpdate.py create mode 100644 eventlet/jsonhttp.py create mode 100644 eventlet/kqueuehub.py create mode 100644 eventlet/logutil.py create mode 100644 eventlet/pollhub.py create mode 100644 eventlet/pools.py create mode 100644 eventlet/pools_test.py create mode 100644 eventlet/processes.py create mode 100644 eventlet/processes_test.py create mode 100644 eventlet/pylibsupport.py create mode 100644 eventlet/runloop.py create mode 100644 eventlet/runloop_test.py create mode 100644 eventlet/selecthub.py create mode 100644 eventlet/stacklesssupport.py create mode 100644 eventlet/tests.py create mode 100644 eventlet/timer.py create mode 100644 eventlet/timer_test.py create mode 100644 eventlet/tls.py create mode 100644 eventlet/twistedsupport.py create mode 100644 eventlet/util.py create mode 100644 eventlet/wrappedfd.py create mode 100644 eventlet/wsgi.py create mode 100644 examples/echoserver.py create mode 100644 examples/webcrawler.py diff --git a/README b/README new file mode 100644 index 0000000..3bf8701 --- /dev/null +++ b/README @@ -0,0 +1,38 @@ += eventlet = + +Eventlet is a networking library written in Python. It achieves high +scalability by using non-blocking io while at the same time retaining +high programmer usability by using coroutines to make the non-blocking +io operations appear blocking at the source code level. + +== requirements === + +Eventlet runs on Python version 2.3 or greater, with the following dependenceis: +* [http://cheeseshop.python.org/pypi/greenlet +* (if running python versions < 2.4) collections.py from the 2.4 distribution or later + +== limitations == + +* Sorely lacking in documentation +* Not enough test coverage -- the goal is 100%, but we are not there yet. +* Eventlet does not currently run on stackless using tasklets, though it is a goal to do so in the future. +* The SSL client does not properly connect to the SSL server, though both client and server interoperate with other SSL implementations (e.g. curl and apache). + +== getting started == + +% python +>>> import eventlet.api +>>> help(eventlet.api) + +Also, look at the examples in the examples directory. + +== eventlet history == + +eventlet began life as Donovan Preston was talking to Bob Ippolito about coroutine-based non-blocking networking frameworks in Python. Most non-blocking frameworks require you to run the "main loop" in order to perform all network operations, but Donovan wondered if a library written using a trampolining style could get away with transparently running the main loop any time i/o was required, stopping the main loop once no more i/o was scheduled. Bob spent a few days during PyCon 2005 writing a proof-of-concept. He named it eventlet, after the coroutine implementation it used, [[greenlet]]. Donovan began using eventlet as a light-weight network library for his spare-time project Pavel, and also began writing some unittests. + +* http://svn.red-bean.com/bob/eventlet/trunk/ +* http://soundfarmer.com/Pavel/trunk/ + +When Donovan started at Linden Lab in May of 2006, he added eventlet as an svn external in the indra/lib/python directory, to be a dependency of the yet-to-be-named [[backbone]] project (at the time, it was named restserv). However, including eventlet as an svn external meant that any time the externally hosted project had hosting issues, Linden developers were not able to perform svn updates. Thus, the eventlet source was imported into the linden source tree at the same location, and became a fork. + +Bob Ippolito has ceased working on eventlet and has stated his desire for Linden to take it's fork forward to the open source world as "the" eventlet. diff --git a/eventlet/__init__.py b/eventlet/__init__.py new file mode 100644 index 0000000..5021bea --- /dev/null +++ b/eventlet/__init__.py @@ -0,0 +1,24 @@ +"""\ +@file __init__.py +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +# This text exists only for the purpose of not making a complete +# mockery of the above copyright header. diff --git a/eventlet/api.py b/eventlet/api.py new file mode 100644 index 0000000..b2e3c53 --- /dev/null +++ b/eventlet/api.py @@ -0,0 +1,305 @@ +"""\ +@file api.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import socket +import string +import linecache +import inspect +import traceback + +try: + import greenlet +except ImportError: + try: + import pylibsupport + pylibsupport.emulate() + greenlet = sys.modules['greenlet'] + except ImportError: + import stacklesssupport + stacklesssupport.emulate() + greenlet = sys.modules['greenlet'] + +from eventlet import greenlib, tls + +__all__ = [ + 'use_hub', 'get_hub', 'sleep', 'spawn', 'kill', + 'call_after', 'exc_after', 'trampoline', 'tcp_listener', 'tcp_server', +] + + +class TimeoutError(Exception): + pass + +_threadlocal = tls.local() + +def tcp_listener(address): + """ + Listen on the given (ip, port) address with a TCP socket. + Returns a socket object which one should call accept() on to + accept a connection on the newly bound socket. + + Generally, the returned socket will be passed to tcp_server, + which accepts connections forever and spawns greenlets for + each incoming connection. + """ + from eventlet import wrappedfd, util + socket = wrappedfd.wrapped_fd(util.tcp_socket()) + util.socket_bind_and_listen(socket, address) + return socket + +def ssl_listener(address, certificate, private_key): + """Listen on the given (ip, port) address with a TCP socket that + can do SSL. + + Returns a socket object which one should call accept() on to + accept a connection on the newly bound socket. + + Generally, the returned socket will be passed to tcp_server, + which accepts connections forever and spawns greenlets for + each incoming connection. + """ + from eventlet import util + socket = util.wrap_ssl(util.tcp_socket(), certificate, private_key) + util.socket_bind_and_listen(socket, address) + socket.is_secure = True + return socket + +def connect_tcp(address): + """ + Create a TCP connection to address (host, port) and return the socket. + """ + from eventlet import wrappedfd, util + desc = wrappedfd.wrapped_fd(util.tcp_socket()) + desc.connect(address) + return desc + +def tcp_server(listensocket, server, *args, **kw): + """ + Given a socket, accept connections forever, spawning greenlets + and executing "server" for each new incoming connection. + When listensocket is closed, the tcp_server greenlet will end. + + listensocket: + The socket to accept connections from. + + server: + The callable to call when a new connection is made. + + *args: + The arguments to pass to the call to server. + + **kw: + The keyword arguments to pass to the call to server. + """ + try: + try: + while True: + spawn(server, listensocket.accept(), *args, **kw) + except socket.error, e: + # Broken pipe means it was shutdown + if e[0] != 32: + raise + finally: + listensocket.close() + +def trampoline(fd, read=None, write=None, timeout=None): + t = None + hub = get_hub() + self = greenlet.getcurrent() + fileno = getattr(fd, 'fileno', lambda: fd)() + def _do_close(fn): + hub.remove_descriptor(fn) + greenlib.switch(self, exc=socket.error(32, 'Broken pipe')) + def _do_timeout(fn): + hub.remove_descriptor(fn) + greenlib.switch(self, exc=TimeoutError()) + def cb(_fileno): + if t is not None: + t.cancel() + hub.remove_descriptor(fileno) + greenlib.switch(self, fd) + if timeout is not None: + t = hub.schedule_call(timeout, _do_timeout) + hub.add_descriptor(fileno, read and cb, write and cb, _do_close) + return hub.switch() + +def _spawn_startup(cb, args, kw, cancel=None): + try: + greenlib.switch(greenlet.getcurrent().parent) + cancel = None + finally: + if cancel is not None: + cancel() + return cb(*args, **kw) + +def _spawn(g): + g.parent = greenlet.getcurrent() + greenlib.switch(g) + + +def spawn(cb, *args, **kw): + # killable + t = None + g = greenlib.tracked_greenlet() + t = get_hub().schedule_call(0, _spawn, g) + greenlib.switch(g, (_spawn_startup, cb, args, kw, t.cancel)) + return g + +kill = greenlib.kill + +def call_after(seconds, cb, *args, **kw): + # cancellable + def startup(): + g = greenlib.tracked_greenlet() + greenlib.switch(g, (_spawn_startup, cb, args, kw)) + greenlib.switch(g) + return get_hub().schedule_call(seconds, startup) + + +def exc_after(seconds, exc): + return call_after(seconds, switch, getcurrent(), None, exc) + + +def get_default_hub(): + try: + import eventlet.kqueuehub + except ImportError: + pass + else: + return eventlet.kqueuehub + import select + if hasattr(select, 'poll'): + import eventlet.pollhub + return eventlet.pollhub + else: + import eventlet.selecthub + return eventlet.selecthub + +def use_hub(mod=None): + if mod is None: + mod = get_default_hub() + if hasattr(_threadlocal, 'hub'): + del _threadlocal.hub + if hasattr(mod, 'Hub'): + _threadlocal.Hub = mod.Hub + else: + _threadlocal.Hub = mod + +def get_hub(): + try: + hub = _threadlocal.hub + except AttributeError: + try: + _threadlocal.Hub + except AttributeError: + use_hub() + hub = _threadlocal.hub = _threadlocal.Hub() + return hub + + +def sleep(timeout=0): + hub = get_hub() + hub.schedule_call(timeout, greenlib.switch, greenlet.getcurrent()) + hub.switch() + + +switch = greenlib.switch +getcurrent = greenlet.getcurrent + + +class Spew(object): + def __init__(self, trace_names=None): + self.trace_names = trace_names + + def __call__(self, frame, event, arg): + if event == 'line': + lineno = frame.f_lineno + if '__file__' in frame.f_globals: + filename = frame.f_globals['__file__'] + if (filename.endswith('.pyc') or + filename.endswith('.pyo')): + filename = filename[:-1] + name = frame.f_globals['__name__'] + line = linecache.getline(filename, lineno) + else: + name = '[unknown]' + try: + src = inspect.getsourcelines(frame) + line = src[lineno] + except IOError: + line = 'Unknown code named [%s]. VM instruction #%d' % ( + frame.f_code.co_name, frame.f_lasti) + if self.trace_names is None or name in self.trace_names: + print '%s:%s: %s' % (name, lineno, line.rstrip()) + details = '\t' + tokens = line.translate( + string.maketrans(' ,.()', '\0' * 5)).split('\0') + for tok in tokens: + if tok in frame.f_globals: + details += '%s=%r ' % (tok, frame.f_globals[tok]) + if tok in frame.f_locals: + details += '%s=%r ' % (tok, frame.f_locals[tok]) + if details.strip(): + print details + return self + + +def spew(trace_names=None): + sys.settrace(Spew(trace_names)) + + +def unspew(): + sys.settrace(None) + + +def named(name): + """Return an object given its name. The name uses a module-like +syntax, eg: + os.path.join + or + mulib.mu.Resource + """ + toimport = name + obj = None + while toimport: + try: + obj = __import__(toimport) + break + except ImportError, err: + # print 'Import error on %s: %s' % (toimport, err) # debugging spam + toimport = '.'.join(toimport.split('.')[:-1]) + if obj is None: + raise ImportError('%s could not be imported' % (name, )) + for seg in name.split('.')[1:]: + try: + obj = getattr(obj, seg) + except AttributeError: + dirobj = dir(obj) + dirobj.sort() + raise AttributeError('attribute %r missing from %r (%r) %r' % ( + seg, obj, dirobj, name)) + return obj + diff --git a/eventlet/api_test.py b/eventlet/api_test.py new file mode 100644 index 0000000..b2c371a --- /dev/null +++ b/eventlet/api_test.py @@ -0,0 +1,164 @@ +"""\ +@file api_test.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from eventlet import tests +from eventlet import api, wrappedfd, util +import socket + + +def check_hub(): + # Clear through the descriptor queue + api.sleep(0) + api.sleep(0) + assert not api.get_hub().descriptors, repr(api.get_hub().descriptors) + # Stop the runloop + api.get_hub().runloop.abort() + api.sleep(0) + assert not api.get_hub().runloop.running + + +class TestApi(tests.TestCase): + mode = 'static' + def test_tcp_listener(self): + socket = api.tcp_listener(('0.0.0.0', 0)) + assert socket.getsockname()[0] == '0.0.0.0' + socket.close() + + check_hub() + + def dont_test_connect_tcp(self): + """This test is broken. Please name it test_connect_tcp and fix + the bug (or the test) so it passes. + """ + def accept_once(listenfd): + try: + conn, addr = listenfd.accept() + conn.write('hello\n') + conn.close() + finally: + listenfd.close() + + server = api.tcp_listener(('0.0.0.0', 0)) + api.spawn(accept_once, server) + + client = api.connect_tcp(('127.0.0.1', server.getsockname()[1])) + assert client.readline() == 'hello\n' + + assert client.read() == '' + client.close() + + check_hub() + + def test_server(self): + server = api.tcp_listener(('0.0.0.0', 0)) + bound_port = server.getsockname()[1] + connected = [] + + def accept_twice((conn, addr)): + print 'connected' + connected.append(True) + conn.close() + if len(connected) == 2: + server.close() + + api.call_after(0, api.connect_tcp, ('127.0.0.1', bound_port)) + api.call_after(0, api.connect_tcp, ('127.0.0.1', bound_port)) + api.tcp_server(server, accept_twice) + + assert len(connected) == 2 + + check_hub() + + def dont_test_trampoline_timeout(self): + """This test is broken. Please change it's name to test_trampoline_timeout, + and fix the bug (or fix the test) + """ + server = api.tcp_listener(('0.0.0.0', 0)) + bound_port = server.getsockname()[1] + + try: + desc = wrappedfd.wrapped_fd(util.tcp_socket()) + api.trampoline(desc, read=True, write=True, timeout=0.1) + except api.TimeoutError: + pass # test passed + else: + assert False, "Didn't timeout" + + check_hub() + + def test_timeout_cancel(self): + server = api.tcp_listener(('0.0.0.0', 0)) + bound_port = server.getsockname()[1] + + def client_connected((conn, addr)): + conn.close() + + def go(): + client = util.tcp_socket() + + desc = wrappedfd.wrapped_fd(client) + desc.connect(('127.0.0.1', bound_port)) + try: + api.trampoline(desc, read=True, write=True, timeout=0.1) + except api.TimeoutError: + assert False, "Timed out" + + server.close() + client.close() + + api.call_after(0, go) + + api.tcp_server(server, client_connected) + + check_hub() + + def dont_test_explicit_hub(self): + """This test is broken. please change it's name to test_explicit_hub + and make it pass (or fix the test) + """ + api.use_hub(Foo) + assert isinstance(api.get_hub(), Foo), api.get_hub() + + api.use_hub(api.get_default_hub()) + + check_hub() + + def test_named(self): + named_foo = api.named('api_test.Foo') + self.assertEquals( + named_foo.__name__, + "Foo") + + def test_naming_missing_class(self): + self.assertRaises( + ImportError, api.named, 'this_name_should_hopefully_not_exist.Foo') + + +class Foo(object): + pass + + +if __name__ == '__main__': + tests.main() + diff --git a/eventlet/backdoor.py b/eventlet/backdoor.py new file mode 100644 index 0000000..8f792dd --- /dev/null +++ b/eventlet/backdoor.py @@ -0,0 +1,85 @@ +"""\ +@file backdoor.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +from code import InteractiveConsole +from eventlet import greenlib + +try: + sys.ps1 +except AttributeError: + sys.ps1 = '>>> ' +try: + sys.ps2 +except AttributeError: + sys.ps2 = '... ' + +class SocketConsole(greenlib.GreenletContext): + def __init__(self, desc): + # mangle the socket + self.desc = desc + readline = desc.readline + self.old = {} + self.fixups = { + 'softspace': 0, + 'isatty': lambda: True, + 'flush': lambda: None, + 'readline': lambda *a: readline(*a).replace('\r\n', '\n'), + } + for key, value in self.fixups.iteritems(): + if hasattr(desc, key): + self.old[key] = getattr(desc, key) + setattr(desc, key, value) + + def finalize(self): + # restore the state of the socket + for key in self.fixups: + try: + value = self.old[key] + except KeyError: + delattr(self.desc, key) + else: + setattr(self.desc, key, value) + self.fixups.clear() + self.old.clear() + self.desc = None + + def swap_in(self): + self.saved = sys.stdin, sys.stderr, sys.stdout + sys.stdin = sys.stdout = sys.stderr = self.desc + + def swap_out(self): + sys.stdin, sys.stderr, sys.stdout = self.saved + +def backdoor((conn, addr), locals=None): + host, port = addr + print "backdoor to %s:%s" % (host, port) + ctx = SocketConsole(conn) + ctx.register() + try: + console = InteractiveConsole(locals) + console.interact() + finally: + ctx.unregister() diff --git a/eventlet/channel.py b/eventlet/channel.py new file mode 100644 index 0000000..a24799a --- /dev/null +++ b/eventlet/channel.py @@ -0,0 +1,98 @@ +"""\ +@file channel.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import collections + +from eventlet import api, greenlib + +import greenlet + +__all__ = ['channel'] + +class channel(object): + """A channel is a control flow primitive for co-routines. It is a + "thread-like" queue for controlling flow between two (or more) co-routines. + The state model is: + + * If one co-routine calls send(), it is unscheduled until another + co-routine calls receive(). + * If one co-rounte calls receive(), it is unscheduled until another + co-routine calls send(). + * Once a paired send()/receive() have been called, both co-routeines + are rescheduled. + + This is similar to: http://stackless.com/wiki/Channels + """ + balance = 0 + + def _tasklet_loop(self): + deque = self.deque = collections.deque() + hub = api.get_hub() + switch = greenlib.switch + direction, caller, args = switch() + try: + while True: + if direction == -1: + # waiting to receive + if self.balance > 0: + sender, args = deque.popleft() + hub.schedule_call(0, switch, sender) + hub.schedule_call(0, switch, caller, *args) + else: + deque.append(caller) + else: + # waiting to send + if self.balance < 0: + receiver = deque.popleft() + hub.schedule_call(0, switch, receiver, *args) + hub.schedule_call(0, switch, caller) + else: + deque.append((caller, args)) + self.balance += direction + direction, caller, args = hub.switch() + finally: + deque.clear() + del self.deque + self.balance = 0 + + def _send_tasklet(self, *args): + try: + t = self._tasklet + except AttributeError: + t = self._tasklet = greenlib.tracked_greenlet() + greenlib.switch(t, (self._tasklet_loop,)) + if args: + return greenlib.switch(t, (1, greenlet.getcurrent(), args)) + else: + return greenlib.switch(t, (-1, greenlet.getcurrent(), args)) + + def receive(self): + return self._send_tasklet() + + def send(self, value): + return self._send_tasklet(value) + + def send_exception(self, exc): + return self._send_tasklet(None, exc) diff --git a/eventlet/coros.py b/eventlet/coros.py new file mode 100644 index 0000000..d430969 --- /dev/null +++ b/eventlet/coros.py @@ -0,0 +1,167 @@ +"""\ +@file coros.py +@author Donovan Preston + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import time +import traceback +import greenlet + + +from eventlet import api +from eventlet import channel +from eventlet import pools +from eventlet import greenlib + +class Cancelled(RuntimeError): + pass + + +NOT_USED = object() + + +class event(object): + """An abstraction where an arbitrary number of coroutines + can wait for one event from another. + """ + _result = None + def __init__(self): + self.reset() + + def reset(self): + """ Reset this event so it can be used to send again. + Can only be called after send has been called.""" + assert self._result is not NOT_USED + self.epoch = time.time() + self._result = NOT_USED + self._waiters = {} + + def wait(self): + """wait until another coroutine calls send. + Returns the value the other coroutine passed to + send. Returns immediately if the event has already + occured. + """ + if self._result is NOT_USED: + self._waiters[greenlet.getcurrent()] = True + return api.get_hub().switch() + if self._exc is not None: + raise self._exc + return self._result + + def cancel(self, waiter): + """Raise an exception into a coroutine which called + wait() an this event instead of returning a value + from wait. Sends the eventlet.coros.Cancelled + exception + + waiter: The greenlet (greenlet.getcurrent()) of the + coroutine to cancel + """ + if waiter in self._waiters: + del self._waiters[waiter] + api.get_hub().schedule_call( + 0, greenlib.switch, waiter, None, Cancelled()) + + def send(self, result=None, exc=None): + """Resume all previous and further + calls to wait() with result. + """ + assert self._result is NOT_USED + self._result = result + self._exc = exc + hub = api.get_hub() + for waiter in self._waiters: + hub.schedule_call(0, greenlib.switch, waiter, self._result) + +class CoroutinePool(pools.Pool): + """ Like a thread pool, but with coroutines. """ + def _main_loop(self, sender): + while True: + recvd = sender.wait() + sender.reset() + (evt, func, args, kw) = recvd + try: + result = func(*args, **kw) + if evt is not None: + evt.send(result) + except greenlet.GreenletExit: + pass + except Exception, e: + traceback.print_exc() + if evt is not None: + evt.send(exc=e) + self.put(sender) + + def create(self): + """Private implementation of eventlet.pools.Pool + interface. Creates an event and spawns the + _main_loop coroutine, passing the event. + The event is used to send a callable into the + new coroutine, to be executed. + """ + sender = event() + api.spawn(self._main_loop, sender) + return sender + + def execute(self, func, *args, **kw): + """Execute func in one of the coroutines maintained + by the pool, when one is free. + + Immediately returns an eventlet.coros.event object which + func's result will be sent to when it is available. + """ + sender = self.get() + receiver = event() + sender.send((receiver, func, args, kw)) + return receiver + + def execute_async(self, func, *args, **kw): + """Execute func in one of the coroutines maintained + by the pool, when one is free. + + This version does not provide the return value. + """ + sender = self.get() + sender.send((None, func, args, kw)) + + +class pipe(object): + """ Implementation of pipe using events. Not tested! Not used, either.""" + def __init__(self): + self._event = event() + self._buffer = '' + + def send(self, txt): + self._buffer += txt + evt, self._event = self._event, event() + evt.send() + + def recv(self, num=16384): + if not self._buffer: + self._event.wait() + if num >= len(self._buffer): + buf, self._buffer = self._buffer, '' + else: + buf, self._buffer = self._buffer[:num], self._buffer[num:] + return buf + diff --git a/eventlet/coros_test.py b/eventlet/coros_test.py new file mode 100644 index 0000000..0ea65ef --- /dev/null +++ b/eventlet/coros_test.py @@ -0,0 +1,144 @@ +"""\ +@file coros_test.py +@author Donovan Preston, Ryan Williams + +Copyright (c) 2000-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" +from eventlet import tests +from eventlet import coros, api + +class TestEvent(tests.TestCase): + mode = 'static' + def setUp(self): + # raise an exception if we're waiting forever + self._cancel_timeout = api.exc_after(1, RuntimeError()) + + def tearDown(self): + self._cancel_timeout.cancel() + + def test_waiting_for_event(self): + evt = coros.event() + value = 'some stuff' + def send_to_event(): + evt.send(value) + api.spawn(send_to_event) + self.assertEqual(evt.wait(), value) + + def test_multiple_waiters(self): + evt = coros.event() + value = 'some stuff' + results = [] + def wait_on_event(i_am_done): + evt.wait() + results.append(True) + i_am_done.send() + + waiters = [] + count = 5 + for i in range(count): + waiters.append(coros.event()) + api.spawn(wait_on_event, waiters[-1]) + evt.send() + + for w in waiters: + w.wait() + + self.assertEqual(len(results), count) + + def test_cancel(self): + evt = coros.event() + # close over the current coro so we can cancel it explicitly + current = api.getcurrent() + def cancel_event(): + evt.cancel(current) + api.spawn(cancel_event) + + self.assertRaises(coros.Cancelled, evt.wait) + + def test_reset(self): + evt = coros.event() + + # calling reset before send should throw + self.assertRaises(AssertionError, evt.reset) + + value = 'some stuff' + def send_to_event(): + evt.send(value) + api.spawn(send_to_event) + self.assertEqual(evt.wait(), value) + + # now try it again, and we should get the same exact value, + # and we shouldn't be allowed to resend without resetting + value2 = 'second stuff' + self.assertRaises(AssertionError, evt.send, value2) + self.assertEqual(evt.wait(), value) + + # reset and everything should be happy + evt.reset() + def send_to_event2(): + evt.send(value2) + api.spawn(send_to_event2) + self.assertEqual(evt.wait(), value2) + +class TestCoroutinePool(tests.TestCase): + mode = 'static' + def setUp(self): + # raise an exception if we're waiting forever + self._cancel_timeout = api.exc_after(1, RuntimeError()) + + def tearDown(self): + self._cancel_timeout.cancel() + + def test_execute_async(self): + done = coros.event() + def some_work(): + done.send() + pool = coros.CoroutinePool(0, 2) + pool.execute_async(some_work) + done.wait() + + def test_execute(self): + value = 'return value' + def some_work(): + return value + pool = coros.CoroutinePool(0, 2) + worker = pool.execute(some_work) + self.assertEqual(value, worker.wait()) + + def test_multiple_coros(self): + evt = coros.event() + results = [] + def producer(): + results.append('prod') + evt.send() + + def consumer(): + results.append('cons1') + evt.wait() + results.append('cons2') + + pool = coros.CoroutinePool(0, 2) + done = pool.execute(consumer) + pool.execute_async(producer) + done.wait() + self.assertEquals(['cons1', 'prod', 'cons2'], results) + +if __name__ == '__main__': + tests.main() diff --git a/eventlet/greenlib.py b/eventlet/greenlib.py new file mode 100644 index 0000000..0fdead5 --- /dev/null +++ b/eventlet/greenlib.py @@ -0,0 +1,323 @@ +"""\ +@file greenlib.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" +import sys +import itertools + +import greenlet + +from eventlet import tls + +__all__ = [ + 'switch', 'kill', 'tracked_greenlets', + 'greenlet_id', 'greenlet_dict', 'GreenletContext', + 'tracked_greenlet', +] + +try: + reversed +except NameError: + def reversed(something): + for x in something[::-1]: + yield x + +_threadlocal = tls.local() + +def tracked_greenlet(): + """ + Returns a greenlet that has a greenlet-local dictionary and can be + used with GreenletContext and enumerated with tracked_greenlets + """ + return greenlet.greenlet(greenlet_body) + +class GreenletContextManager(object): + """ + Per-thread manager for GreenletContext. Created lazily on registration + """ + def __new__(cls, *args, **kw): + dct = greenlet_dict() + self = dct.get('greenlet_context', None) + if self is not None: + return self + self = super(GreenletContextManager, cls).__new__(cls, *args, **kw) + dct['greenlet_context'] = self + self.contexts = [] + return self + + def add_context(self, ctx): + fn = getattr(ctx, '_swap_in', None) + if fn is not None: + fn() + self.contexts.append(ctx) + + def remove_context(self, ctx): + try: + idx = self.contexts.index(ctx) + except ValueError: + return + else: + del self.contexts[idx] + fn = getattr(ctx, '_swap_out', None) + if fn is not None: + fn() + fn = getattr(ctx, '_finalize', None) + if fn is not None: + fn() + + def swap_in(self): + for ctx in self.contexts: + fn = getattr(ctx, '_swap_in', None) + if fn is not None: + fn() + + def swap_out(self): + for ctx in reversed(self.contexts): + fn = getattr(ctx, '_swap_out', None) + if fn is not None: + fn() + + def finalize(self): + for ctx in reversed(self.contexts): + fn = getattr(ctx, '_swap_out', None) + if fn is not None: + fn() + fn = getattr(ctx, '_finalize', None) + if fn is not None: + fn() + del self.contexts[:] + try: + del greenlet_dict()['greenlet_context'] + except KeyError: + pass + +class GreenletContext(object): + """ + A context manager to be triggered when a specific tracked greenlet is + swapped in, swapped out, or finalized. + + To use, subclass and override the swap_in, swap_out, and/or finalize + methods, for example:: + + import greenlib + from greenlib import greenlet_id, tracked_greenlet, switch + + class NotifyContext(greenlib.GreenletContext): + + def swap_in(self): + print "swap_in" + + def swap_out(self): + print "swap_out" + + def finalize(self): + print "finalize" + + def another_greenlet(): + print "another_greenlet" + + def notify_demo(): + print "starting" + NotifyContext().register() + switch(tracked_greenlet(), (another_greenlet,)) + print "finishing" + # we could have kept the NotifyContext object + # to unregister it here but finalization of all + # contexts is implicit when the greenlet returns + + t = tracked_greenlet() + switch(t, (notify_demo,)) + + The output should be: + + starting + swap_in + swap_out + another_greenlet + swap_in + finishing + swap_out + finalize + + """ + _balance = 0 + + def _swap_in(self): + if self._balance != 0: + raise RuntimeError("balance != 0: %r" % (self._balance,)) + self._balance = self._balance + 1 + fn = getattr(self, 'swap_in', None) + if fn is not None: + fn() + + def _swap_out(self): + if self._balance != 1: + raise RuntimeError("balance != 1: %r" % (self._balance,)) + self._balance = self._balance - 1 + fn = getattr(self, 'swap_out', None) + if fn is not None: + fn() + + def register(self): + GreenletContextManager().add_context(self) + + def unregister(self): + GreenletContextManager().remove_context(self) + + def _finalize(self): + fn = getattr(self, 'finalize', None) + if fn is not None: + fn() + + +def kill(g): + """ + Kill the given greenlet if it is alive by sending it a GreenletExit. + + Note that of any other exception is raised, it will pass-through! + """ + if not g: + return + kill_exc = greenlet.GreenletExit() + try: + try: + g.parent = greenlet.getcurrent() + except ValueError: + pass + try: + switch(g, exc=kill_exc) + except SwitchingToDeadGreenlet: + pass + except greenlet.GreenletExit, e: + if e is not kill_exc: + raise + +def tracked_greenlets(): + """ + Return a list of greenlets tracked in this thread. Tracked greenlets + use greenlet_body() to ensure that they have greenlet-local storage. + """ + try: + return _threadlocal.greenlets.keys() + except AttributeError: + return [] + +def greenlet_id(): + """ + Get the id of the current tracked greenlet, returns None if the + greenlet is not tracked. + """ + try: + d = greenlet_dict() + except RuntimeError: + return None + return d['greenlet_id'] + +def greenlet_dict(): + """ + Return the greenlet local storage for this greenlet. Raises RuntimeError + if this greenlet is not tracked. + """ + self = greenlet.getcurrent() + try: + return _threadlocal.greenlets[self] + except (AttributeError, KeyError): + raise RuntimeError("greenlet %r is not tracked" % (self,)) + +def _greenlet_context(dct=None): + if dct is None: + try: + dct = greenlet_dict() + except RuntimeError: + return None + return dct.get('greenlet_context', None) + +def _greenlet_context_call(name, dct=None): + ctx = _greenlet_context(dct) + fn = getattr(ctx, name, None) + if fn is not None: + fn() + +def greenlet_body(value, exc): + """ + Track the current greenlet during the execution of the given callback, + normally you would use tracked_greenlet() to get a greenlet that uses this. + + Greenlets using this body must be greenlib.switch()'ed to + """ + if exc is not None: + if isinstance(exc, tuple): + raise exc[0], exc[1], exc[2] + raise exc + cb, args = value[0], value[1:] + try: + greenlets = _threadlocal.greenlets + except AttributeError: + greenlets = _threadlocal.greenlets = {} + else: + if greenlet.getcurrent() in greenlets: + raise RuntimeError("greenlet_body can not be called recursively!") + try: + greenlet_id = _threadlocal.next_greenlet_id.next() + except AttributeError: + greenlet_id = 1 + _threadlocal.next_greenlet_id = itertools.count(2) + greenlets[greenlet.getcurrent()] = {'greenlet_id': greenlet_id} + try: + return cb(*args) + finally: + _greenlet_context_call('finalize') + greenlets.pop(greenlet.getcurrent(), None) + + +class SwitchingToDeadGreenlet(RuntimeError): + pass + + +def switch(other=None, value=None, exc=None): + """ + Switch to another greenlet, passing value or exception + """ + self = greenlet.getcurrent() + if other is None: + other = self.parent + if other is None: + other = self + if not (other or hasattr(other, 'run')): + raise SwitchingToDeadGreenlet("Switching to dead greenlet %r %r %r" % (other, value, exc)) + _greenlet_context_call('swap_out') + try: + rval = other.switch(value, exc) + if not rval or not other: + res, exc = rval, None + else: + res, exc = rval + except: + res, exc = None, sys.exc_info() + _greenlet_context_call('swap_in') + if isinstance(exc, tuple): + typ, exc, tb = exc + raise typ, exc, tb + elif exc is not None: + raise exc + return res diff --git a/eventlet/httpc.py b/eventlet/httpc.py new file mode 100755 index 0000000..af6f40f --- /dev/null +++ b/eventlet/httpc.py @@ -0,0 +1,334 @@ +"""\ +@file httpc.py +@author Donovan Preston + +Copyright (c) 2005-2006, Donovan Preston +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import datetime +import httplib +import os.path +import os +import time +import urlparse + + +from mx.DateTime import Parser + + +_old_HTTPConnection = httplib.HTTPConnection +_old_HTTPSConnection = httplib.HTTPSConnection + + +HTTP_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' + + +to_http_time = lambda t: time.strftime(HTTP_TIME_FORMAT, time.gmtime(t)) +from_http_time = lambda t: int(Parser.DateTimeFromString(t).gmticks()) + +def host_and_port_from_url(url): + """@brief Simple function to get host and port from an http url. + @return Returns host, port and port may be None. + """ + host = None + port = None + #print url + parsed_url = urlparse.urlparse(url) + try: + host, port = parsed_url[1].split(':') + except ValueError: + host = parsed_url[1].split(':') + return host, port + + +def better_putrequest(self, method, url, skip_host=0): + self.method = method + self.path = url + self.old_putrequest(method, url, skip_host) + + +class HttpClient(httplib.HTTPConnection): + """A subclass of httplib.HTTPConnection which works around a bug + in the interaction between eventlet sockets and httplib. httplib relies + on gc to close the socket, causing the socket to be closed too early. + + This is an awful hack and the bug should be fixed properly ASAP. + """ + def __init__(self, host, port=None, strict=None): + _old_HTTPConnection.__init__(self, host, port, strict) + + def close(self): + pass + + old_putrequest = httplib.HTTPConnection.putrequest + putrequest = better_putrequest + + +def wrap_httplib_with_httpc(): + httplib.HTTP._connection_class = httplib.HTTPConnection = HttpClient + httplib.HTTPS._connection_class = httplib.HTTPSConnection = HttpsClient + + +class HttpsClient(httplib.HTTPSConnection): + def close(self): + pass + old_putrequest = httplib.HTTPSConnection.putrequest + putrequest = better_putrequest + + +class FileScheme(object): + """Retarded scheme to local file wrapper.""" + host = '' + port = '' + reason = '' + + def __init__(self, location): + pass + + def request(self, method, fullpath, body='', headers=None): + self.status = 200 + self.path = fullpath.split('?')[0] + self.method = method = method.lower() + assert method in ('get', 'put', 'delete') + if method == 'delete': + try: + os.remove(self.path) + except OSError: + pass # don't complain if already deleted + elif method == 'put': + try: + f = file(self.path, 'w') + f.write(body) + f.close() + except IOError, e: + self.status = 500 + self.raise_connection_error() + elif method == 'get': + if not os.path.exists(self.path): + self.status = 404 + self.raise_connection_error(NotFound) + + def connect(self): + pass + + def getresponse(self): + return self + + def getheader(self, header): + if header == 'content-length': + try: + return os.path.getsize(self.path) + except OSError: + return 0 + + def read(self, howmuch=None): + if self.method == 'get': + try: + fl = file(self.path, 'r') + if howmuch is None: + return fl.read() + else: + return fl.read(howmuch) + except IOError: + self.status = 500 + self.raise_connection_error() + return '' + + def raise_connection_error(self, klass=None): + if klass is None: + klass=ConnectionError + raise klass( + self.method, self.host, self.port, + self.path, self.status, self.reason, '') + + +class ConnectionError(Exception): + def __init__(self, method, host, port, path, status, reason, body): + self.method = method + self.host = host + self.port = port + self.path = path + self.status = status + self.reason = reason + self.body = body + Exception.__init__(self) + + def __repr__(self): + return "ConnectionError(%r, %r, %r, %r, %r, %r, %r)" % ( + self.method, self.host, self.port, + self.path, self.status, self.reason, self.body) + + __str__ = __repr__ + + +class UnparseableResponse(ConnectionError): + def __init__(self, content_type, response): + self.content_type = content_type + self.response = response + Exception.__init__(self) + + def __repr__(self): + return "UnparseableResponse(%r, %r)" % ( + self.content_type, self.response) + + __str__ = __repr__ + + +class Accepted(ConnectionError): + pass + + +class NotFound(ConnectionError): + pass + + +class Forbidden(ConnectionError): + pass + + +class InternalServerError(ConnectionError): + pass + + +class Gone(ConnectionError): + pass + + +status_to_error_map = { + 500: InternalServerError, + 410: Gone, + 404: NotFound, + 403: Forbidden, + 202: Accepted, +} + +scheme_to_factory_map = { + 'http': HttpClient, + 'https': HttpsClient, + 'file': FileScheme, +} + + +def make_connection(scheme, location, use_proxy): + if use_proxy: + if "http_proxy" in os.environ: + location = os.environ["http_proxy"] + elif "ALL_PROXY" in os.environ: + location = os.environ["ALL_PROXY"] + else: + location = "localhost:3128" #default to local squid + + # run a little heuristic to see if it's an url, and if so parse out the hostpart + if location.startswith('http'): + _scheme, location, path, parameters, query, fragment = urlparse.urlparse(location) + + result = scheme_to_factory_map[scheme](location) + result.connect() + return result + + +def connect(url, use_proxy=False): + scheme, location, path, params, query, id = urlparse.urlparse(url) + return make_connection(scheme, location, use_proxy) + + +def request(connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, verbose=False, ok=None): + if ok is None: + ok = (200, 201, 204) + if headers is None: + headers = {} + if not use_proxy: + scheme, location, path, params, query, id = urlparse.urlparse(url) + url = path + if query: + url += "?" + query + else: + scheme, location, path, params, query, id = urlparse.urlparse(url) + headers.update({ "host" : location }) + if scheme == 'file': + use_proxy = False + + + if dumper is not None: + body = dumper(body) + headers['content-length'] = len(body) + + connection.request(method, url, body, headers) + response = connection.getresponse() + if (response.status not in ok): + klass = status_to_error_map.get(response.status, ConnectionError) + raise klass( + connection.method, connection.host, connection.port, + connection.path, response.status, response.reason, response.read()) + + body = response.read() + + if loader is None: + return body + + try: + body = loader(body) + except Exception, e: + raise UnparseableResponse(loader, body) + + if verbose: + return response.status, response.msg, body + return body + + +def make_suite(dumper, loader, fallback_content_type): + def get(url, headers=None, use_proxy=False, verbose=False, ok=None): + #import pdb; pdb.Pdb().set_trace() + if headers is None: + headers = {} + connection = connect(url) + return request(connection, 'GET', url, '', headers, None, loader, use_proxy, verbose, ok) + + def put(url, data, headers=None, content_type=None, verbose=False, ok=None): + if headers is None: + headers = {} + if content_type is not None: + headers['content-type'] = content_type + else: + headers['content-type'] = fallback_content_type + connection = connect(url) + return request(connection, 'PUT', url, data, headers, dumper, loader, verbose=verbose, ok=ok) + + def delete(url, verbose=False, ok=None): + return request(connect(url), 'DELETE', url, verbose=verbose, ok=ok) + + def post(url, data='', headers=None, content_type=None, verbose=False, ok=None): + connection = connect(url) + if headers is None: + headers = {} + if 'content-type' not in headers: + if content_type is not None: + headers['content-type'] = content_type + else: + headers['content-type'] = fallback_content_type + return request(connect(url), 'POST', url, data, headers, dumper, loader, verbose=verbose, ok=ok) + + return get, put, delete, post + + +get, put, delete, post = make_suite(str, None, 'text/plain') + + diff --git a/eventlet/httpd.py b/eventlet/httpd.py new file mode 100644 index 0000000..e6904db --- /dev/null +++ b/eventlet/httpd.py @@ -0,0 +1,491 @@ +"""\ +@file httpd.py +@author Donovan Preston + +Copyright (c) 2005-2006, Donovan Preston +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import cgi +import cStringIO +import errno +import socket +import sys +import time +import urllib +import socket +import traceback +import cStringIO +import BaseHTTPServer + +from eventlet import api +from eventlet import coros + + +USE_ACCESS_LOG = True + + +CONNECTION_CLOSED = (errno.EPIPE, errno.ECONNRESET) + + +class Request(object): + _method = None + _path = None + _responsecode = 200 + _reason_phrase = None + _request_started = False + _chunked = False + _producer_adapters = {} + depth = 0 + def __init__(self, protocol, method, path, headers): + self.request_start_time = time.time() + self.site = protocol.server.site + self.protocol = protocol + self._method = method + if '?' in path: + self._path, self._query = path.split('?', 1) + self._query = self._query.replace('&', '&') + else: + self._path, self._query = path, None + self._incoming_headers = headers + self._outgoing_headers = dict() + + def response(self, code, reason_phrase=None, headers=None, body=None): + """Change the response code. This will not be sent until some + data is written; last call to this method wins. Default is + 200 if this is not called. + """ + self._responsecode = code + self._reason_phrase = reason_phrase + self.protocol.set_response_code(self, code, reason_phrase) + if headers is not None: + for key, value in headers: + self.set_header(key, value) + if body is not None: + self.write(body) + + def is_okay(self): + return 200 <= self._responsecode <= 299 + + def full_url(self): + path = self.path() + query = self.query() + if query: + path = path + '?' + query + + via = self.get_header('via', '') + if via.strip(): + next_part = iter(via.split()).next + + received_protocol = next_part() + received_by = next_part() + if received_by.endswith(','): + received_by = received_by[:-1] + else: + comment = '' + while not comment.endswith(','): + try: + comment += next_part() + except StopIteration: + comment += ',' + break + comment = comment[:-1] + else: + received_by = self.get_header('host') + + return '%s://%s%s' % (self.request_protocol(), received_by, path) + + def begin_response(self, length="-"): + """Begin the response, and return the initial response text + """ + self._request_started = True + request_time = time.time() - self.request_start_time + + code = self._responsecode + proto = self.protocol + + if USE_ACCESS_LOG: + proto.server.write_access_log_line( + proto.client_address[0], + time.strftime("%d/%b/%Y %H:%M:%S"), + proto.requestline, + code, + length, + request_time) + + if self._reason_phrase is not None: + message = self._reason_phrase.split("\n")[0] + elif code in proto.responses: + message = proto.responses[code][0] + else: + message = '' + if proto.request_version == 'HTTP/0.9': + return [] + + response_lines = proto.generate_status_line() + + if not self._outgoing_headers.has_key('connection'): + con = self.get_header('connection') + if con is None and proto.request_version == 'HTTP/1.0': + con = 'close' + if con is not None: + self.set_header('connection', con) + + for key, value in self._outgoing_headers.items(): + key = '-'.join([x.capitalize() for x in key.split('-')]) + response_lines.append("%s: %s" % (key, value)) + + response_lines.append("") + return response_lines + + def write(self, obj): + """Writes an arbitrary object to the response, using + the sitemap's adapt method to convert it to bytes. + """ + if isinstance(obj, str): + self._write_bytes(obj) + elif isinstance(obj, unicode): + # use utf8 encoding for now, *TODO support charset negotiation + # Content-Type: text/html; charset=utf-8 + ctype = self._outgoing_headers.get('content-type', 'text/html') + ctype = ctype + '; charset=utf-8' + self._outgoing_headers['content-type'] = ctype + self._write_bytes(obj.encode('utf8')) + else: + self.site.adapt(obj, self) + + def _write_bytes(self, data): + """Write all the data of the response. + Can be called just once. + """ + if self._request_started: + print "Request has already written a response:" + traceback.print_stack() + return + + self._outgoing_headers['content-length'] = len(data) + + response_lines = self.begin_response(len(data)) + response_lines.append(data) + self.protocol.wfile.write("\r\n".join(response_lines)) + if hasattr(self.protocol.wfile, 'flush'): + self.protocol.wfile.flush() + + def method(self): + return self._method + + def path(self): + return self._path + + def path_segments(self): + return [urllib.unquote_plus(x) for x in self._path.split('/')[1:]] + + def query(self): + return self._query + + def uri(self): + if self._query: + return '%s?%s' % ( + self._path, self._query) + return self._path + + def get_headers(self): + return self._incoming_headers + + def get_header(self, header_name, default=None): + return self.get_headers().get(header_name.lower(), default) + + def get_query_pairs(self): + if not hasattr(self, '_split_query'): + if self._query is None: + self._split_query = () + else: + spl = self._query.split('&') + spl = [x.split('=', 1) for x in spl if x] + self._split_query = [] + for query in spl: + if len(query) == 1: + key = query[0] + value = '' + else: + key, value = query + self._split_query.append((urllib.unquote_plus(key), urllib.unquote_plus(value))) + + return self._split_query + + def get_queries_generator(self, name): + """Generate all query parameters matching the given name. + """ + for key, value in self.get_query_pairs(): + if key == name or not name: + yield value + + get_queries = lambda self, name: list(self.get_queries_generator) + + def get_query(self, name, default=None): + try: + return self.get_queries_generator(name).next() + except StopIteration: + return default + + def get_arg_list(self, name): + return self.get_field_storage().getlist(name) + + def get_arg(self, name, default=None): + return self.get_field_storage().getfirst(name, default) + + def get_field_storage(self): + if not hasattr(self, '_field_storage'): + if self.method() == 'GET': + data = '' + if self._query: + data = self._query + fl = cStringIO.StringIO(data) + else: + fl = self.protocol.rfile + ## Allow our resource to provide the FieldStorage instance for + ## customization purposes. + headers = self.get_headers() + environ = dict( + REQUEST_METHOD='POST', + QUERY_STRING=self._query or '') + if (hasattr(self, 'resource') and + hasattr(self.resource, 'getFieldStorage')): + self._field_storage = self.resource.getFieldStorage( + self, fl, headers, environ) + else: + self._field_storage = cgi.FieldStorage( + fl, headers, environ=environ) + + return self._field_storage + + def set_header(self, key, value): + if key.lower() == 'connection' and value.lower() == 'close': + self.protocol.close_connection = 1 + self._outgoing_headers[key.lower()] = value + __setitem__ = set_header + + def get_outgoing_header(self, key): + return self._outgoing_headers[key.lower()] + + def has_outgoing_header(self, key): + return self._outgoing_headers.has_key(key.lower()) + + def socket(self): + return self.protocol.socket + + def error(self, response=None, body=None, log_traceback=True): + if log_traceback: + traceback.print_exc() + if response is None: + response = 500 + if body is None: + typ, val, tb = sys.exc_info() + body = dict(type=str(typ), error=True, reason=str(val)) + self.response(response) + if type(body) is str: + self.write(body) + return + try: + produce(body, self) + except Exception, e: + traceback.print_exc() + if not self.response_written(): + self.write('Internal Server Error') + + def not_found(self): + self.error(404, 'Not Found\n', log_traceback=False) + + def read_body(self): + if not hasattr(self, '_cached_parsed_body'): + if not hasattr(self, '_cached_body'): + length = self.get_header('content-length') + if length: + length = int(length) + if length: + self._cached_body = self.protocol.rfile.read(length) + else: + self._cached_body = '' + body = self._cached_body + if hasattr(self.site, 'parsers'): + parser = self.site.parsers.get( + self.get_header('content-type')) + if parser is not None: + body = parser(body) + self._cached_parsed_body = body + return self._cached_parsed_body + + def response_written(self): + ## TODO change badly named variable + return self._request_started + + def request_version(self): + return self.protocol.request_version + + def request_protocol(self): + if self.protocol.socket.is_secure: + return "https" + return "http" + + def server_address(self): + return self.protocol.server.address + + def __repr__(self): + return "" % ( + getattr(self, '_method'), getattr(self, '_path')) + + +DEFAULT_TIMEOUT = 300 + + +class Timeout(RuntimeError): + pass + + +class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): + protocol_version = "HTTP/1.1" + def __init__(self, request, client_address, server): + self.socket = self.request = self.rfile = self.wfile = request + self.client_address = client_address + self.server = server + self._code = 200 + self._message = 'OK' + + def set_response_code(self, request, code, message): + self._code = code + if message is not None: + self._message = message.split("\n")[0] + elif code in self.responses: + self._message = self.responses[code][0] + else: + self._message = '' + + def generate_status_line(self): + return [ + "%s %d %s" % ( + self.protocol_version, self._code, self._message)] + + def handle(self): + self.close_connection = 0 + + timeout = DEFAULT_TIMEOUT + while not self.close_connection: + if timeout == 0: + break + cancel = api.exc_after(timeout, Timeout) + try: + self.raw_requestline = self.rfile.readline() + except socket.error, e: + if e[0] in CONNECTION_CLOSED: + self.close_connection = True + cancel.cancel() + continue + except Timeout: + self.close_connection = True + continue + cancel.cancel() + + if not self.raw_requestline or not self.parse_request(): + self.close_connection = True + continue + + request = Request(self, self.command, self.path, self.headers) + request.set_header('Server', self.version_string()) + request.set_header('Date', self.date_time_string()) + try: + timeout = int(request.get_header('keep-alive')) + except (TypeError, ValueError), e: + pass + + try: + self.server.site.handle_request(request) + # throw an exception if it failed to write a body + if not request.response_written(): + raise NotImplementedError("Handler failed to write response to request: %s" % request) + + if not hasattr(self, '_cached_body'): + request.read_body() ## read & discard body + continue + except socket.error, e: + # Broken pipe, connection reset by peer + if e[0] in CONNECTION_CLOSED: + #print "Remote host closed connection before response could be sent" + pass + else: + raise + except Exception, e: + if not request.response_written(): + request.response(500) + request.write('Internal Server Error') + self.socket.close() + raise + self.socket.close() + + +class Server(BaseHTTPServer.HTTPServer): + def __init__(self, socket, address, site, log): + self.socket = socket + self.address = address + self.site = site + if log: + self.log = log + if hasattr(log, 'info'): + log.write = log.info + else: + self.log = self + + def write(self, something): + sys.stdout.write('%s\n' % (something, )) + + def log_message(self, message): + self.log.write(message) + + def log_exception(self, type, value, tb): + print ''.join(traceback.format_exception(type, value, tb)) + + def write_access_log_line(self, *args): + """Write a line to the access.log. Arguments: + client_address, date_time, requestline, code, size, request_time + """ + self.log.write( + '%s - - [%s] "%s" %s %s %.6f' % args) + + +def server(sock, site, log=None, max_size=512): + pool = coros.CoroutinePool(max_size=max_size) + serv = Server(sock, sock.getsockname(), site, log) + try: + print "httpd starting up on", sock.getsockname() + while True: + try: + new_sock, address = sock.accept() + proto = HttpProtocol(new_sock, address, serv) + pool.execute_async(proto.handle) + except KeyboardInterrupt: + api.get_hub().remove_descriptor(sock.fileno()) + print "httpd exiting" + break + finally: + try: + sock.close() + except socket.error: + pass + + diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py new file mode 100644 index 0000000..19b2c9d --- /dev/null +++ b/eventlet/httpd_test.py @@ -0,0 +1,139 @@ +"""\ +@file httpd_test.py +@author Donovan Preston + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +from eventlet import api +from eventlet import httpd +from eventlet import processes +from eventlet import util + + +util.wrap_socket_with_coroutine_socket() + + +from eventlet import tests + + +class Site(object): + def handle_request(self, req): + req.write('hello world') + + def adapt(self, obj, req): + req.write(str(obj)) + + +CONTENT_LENGTH = 'content-length' + + +""" +HTTP/1.1 200 OK +Date: foo +Content-length: 11 + +hello world +""" + +class ConnectionClosed(Exception): + pass + + +def read_http(sock): + response_line = sock.readline() + if not response_line: + raise ConnectionClosed + raw_headers = sock.readuntil('\r\n\r\n').strip() + #print "R", response_line, raw_headers + headers = dict() + for x in raw_headers.split('\r\n'): + #print "X", x + key, value = x.split(': ', 1) + headers[key.lower()] = value + + if CONTENT_LENGTH in headers: + num = int(headers[CONTENT_LENGTH]) + body = sock.read(num) + #print body + + +class TestHttpd(tests.TestCase): + mode = 'static' + def setUp(self): + self.killer = api.spawn( + httpd.server, api.tcp_listener(('0.0.0.0', 12346)), Site(), max_size=128) + + def tearDown(self): + api.kill(self.killer) + + def test_001_server(self): + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + + sock.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') + result = sock.read() + sock.close() + ## The server responds with the maximum version it supports + self.assert_(result.startswith('HTTP'), result) + self.assert_(result.endswith('hello world')) + + def test_002_keepalive(self): + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + read_http(sock) + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + read_http(sock) + sock.close() + + def test_003_passing_non_int_to_read(self): + # This should go in test_wrappedfd + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + cancel = api.exc_after(1, RuntimeError) + self.assertRaises(TypeError, sock.read, "This shouldn't work") + cancel.cancel() + sock.close() + + def test_004_close_keepalive(self): + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + read_http(sock) + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') + read_http(sock) + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + self.assertRaises(ConnectionClosed, read_http, sock) + sock.close() + + def test_005_run_apachebench(self): + url = 'http://localhost:12346/' + out = processes.Process('/usr/sbin/ab', ['-c','64','-n','1024', '-k', url]) + print out.read() + + +if __name__ == '__main__': + tests.main() diff --git a/eventlet/httpdate.py b/eventlet/httpdate.py new file mode 100644 index 0000000..e1f81f3 --- /dev/null +++ b/eventlet/httpdate.py @@ -0,0 +1,39 @@ +"""\ +@file httpdate.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import time + +__all__ = ['format_date_time'] + +# Weekday and month names for HTTP date/time formatting; always English! +_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] +_monthname = [None, # Dummy so we can use 1-based month numbers + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] + +def format_date_time(timestamp): + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) + return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + _weekdayname[wd], day, _monthname[month], year, hh, mm, ss + ) diff --git a/eventlet/jsonhttp.py b/eventlet/jsonhttp.py new file mode 100644 index 0000000..31610fb --- /dev/null +++ b/eventlet/jsonhttp.py @@ -0,0 +1,31 @@ +"""\ +@file jsonhttp.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from eventlet import httpc + +import simplejson + + +get, put, delete, post = httpc.make_suite( + simplejson.dumps, simplejson.loads, 'application/json') diff --git a/eventlet/kqueuehub.py b/eventlet/kqueuehub.py new file mode 100644 index 0000000..595e7b9 --- /dev/null +++ b/eventlet/kqueuehub.py @@ -0,0 +1,219 @@ +"""\ +@file kqueuehub.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import select +import kqueue +import traceback +from errno import EBADF + +from eventlet import greenlib +from eventlet.runloop import RunLoop, Timer + +import greenlet + +class Hub(object): + def __init__(self): + self.runloop = RunLoop(self.wait) + self.descriptor_queue = {} + self.descriptors = {} + self.greenlet = None + self.kfd = None + + def stop(self): + self.process_queue() + self.descriptors, self.descriptor_queue = self.descriptor_queue, {} + os.close(self.kfd) + self.kfd = None + self.runloop.abort() + if self.greenlet is not greenlet.getcurrent(): + self.switch() + + def schedule_call(self, *args, **kw): + return self.runloop.schedule_call(*args, **kw) + + def switch(self): + if not self.greenlet: + self.greenlet = greenlib.tracked_greenlet() + args = ((self.runloop.run,),) + else: + args = () + try: + greenlet.getcurrent().parent = self.greenlet + except ValueError: + pass + return greenlib.switch(self.greenlet, *args) + + def add_descriptor(self, fileno, read=None, write=None, exc=None): + self.descriptor_queue[fileno] = read, write, exc + + def remove_descriptor(self, fileno): + self.descriptor_queue[fileno] = None, None, None + + def exc_descriptor(self, fileno): + # We must handle two cases here, the descriptor + # may be changing or removing its exc handler + # in the queue, or it may be waiting on the queue. + exc = None + try: + exc = self.descriptor_queue[fileno][2] + except KeyError: + try: + exc = self.descriptors[fileno][2] + except KeyError: + pass + if exc is not None: + try: + exc() + except self.runloop.SYSTEM_EXCEPTIONS: + self.squelch_exception(fileno, sys.exc_info()) + + def squelch_exception(self, fileno, exc_info): + traceback.print_exception(*exc_info) + print >>sys.stderr, "Removing descriptor: %r" % (fileno,) + try: + self.remove_descriptor(fileno) + except Exception, e: + print >>sys.stderr, "Exception while removing descriptor! %r" % (e,) + + def process_queue(self): + if self.kfd is None: + self.kfd = kqueue.kqueue() + d = self.descriptors + + E_R = kqueue.EVFILT_READ + E_W = kqueue.EVFILT_WRITE + E = kqueue.Event + E_ADD = kqueue.EV_ADD + E_DEL = kqueue.EV_DELETE + + kevent = kqueue.kevent + kfd = self.kfd + for fileno, rwe in self.descriptor_queue.iteritems(): + read, write, exc = rwe + if read is None and write is None and exc is None: + try: + read, write, exc = d.pop(fileno) + except KeyError: + pass + else: + l = [] + if read is not None: + l.append(E(fileno, E_R, E_DEL)) + if write is not None: + l.append(E(fileno, E_W, E_DEL)) + if l: + try: + kevent(kfd, l, 0, 0) + except OSError, e: + if e[0] != EBADF: + raise + else: + l = [] + try: + oldr, oldw, olde = d[fileno] + except KeyError: + pass + else: + if oldr is not None: + if read is None: + l.append(E(fileno, E_R, E_DEL)) + else: + read = None + if oldw is not None: + if write is None: + l.append(E(fileno, E_W, E_DEL)) + else: + write = None + if read is not None: + l.append(E(fileno, E_R, E_ADD)) + if write is not None: + l.append(E(fileno, E_W, E_ADD)) + if l: + try: + kevent(kfd, l, 0, 0) + except OSError, e: + if e[0] != EBADF: + raise + try: + del d[fileno] + except KeyError: + pass + if exc is not None: + try: + exc(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) + continue + d[fileno] = rwe + self.descriptor_queue.clear() + + def wait(self, seconds=None): + + self.process_queue() + + if seconds is not None: + seconds *= 1000000000.0 + dct = self.descriptors + events = kqueue.kevent(self.kfd, [], len(dct), seconds) + + SYSTEM_EXCEPTIONS = self.runloop.SYSTEM_EXCEPTIONS + + E_R = kqueue.EVFILT_READ + E_W = kqueue.EVFILT_WRITE + E_EOF = kqueue.EV_EOF + + for e in events: + fileno = e.ident + event = e.filter + + try: + read, write, exc = dct[fileno] + except KeyError: + continue + + if read is not None and event == E_R: + try: + read(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) + elif exc is not None and e.fflags & E_EOF: + try: + exc(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) + + if write is not None and event == E_W: + try: + write(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) diff --git a/eventlet/logutil.py b/eventlet/logutil.py new file mode 100644 index 0000000..0885042 --- /dev/null +++ b/eventlet/logutil.py @@ -0,0 +1,112 @@ +"""\ +@file logutil.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import syslog +import logging + + +def file_logger(filename): + """Create a logger. This sucks, the logging module sucks, but + it'll do for now. + """ + handler = logging.FileHandler(filename) + formatter = logging.Formatter() + handler.setFormatter(formatter) + log = logging.getLogger(filename) + log.addHandler(handler) + log.setLevel(logging.DEBUG) + return log, handler + + +def stream_logger(stream): + """Create a logger. This sucks.""" + handler = logging.StreamHandler(stream) + formatter = logging.Formatter() + handler.setFormatter(formatter) + log = logging.getLogger() + log.addHandler(handler) + log.setLevel(logging.DEBUG) + return log, handler + + +class LineLogger(object): + towrite = '' + def __init__(self, emit=None): + if emit is not None: + self.emit = emit + + def write(self, stuff): + self.towrite += stuff + if '\n' in self.towrite: + self.flush() + + def flush(self): + try: + newline = self.towrite.index('\n') + except ValueError: + newline = len(self.towrite) + while True: + self.emit(self.towrite[:newline]) + self.towrite = self.towrite[newline+1:] + try: + newline = self.towrite.index('\n') + except ValueError: + break + + def close(self): + pass + + def emit(self, *args): + pass + + +class SysLogger(LineLogger): + """A file-like object which writes to syslog. Can be inserted + as sys.stdin and sys.stderr to have logging output redirected + to syslog. + """ + def __init__(self, priority): + self.priority = priority + + def emit(self, line): + syslog.syslog(self.priority, line) + + +class TeeLogger(LineLogger): + def __init__(self, one, two): + self.one, self.two = one, two + + def emit(self, line): + self.one.emit(line) + self.two.emit(line) + + +class FileLogger(LineLogger): + def __init__(self, file): + self.file = file + + def emit(self, line): + self.file.write(line + '\n') + self.file.flush() + diff --git a/eventlet/pollhub.py b/eventlet/pollhub.py new file mode 100644 index 0000000..0540837 --- /dev/null +++ b/eventlet/pollhub.py @@ -0,0 +1,189 @@ +"""\ +@file pollhub.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import select +import socket +import errno +import traceback +from time import sleep + +from eventlet import greenlib +from eventlet.runloop import RunLoop, Timer + +import greenlet + +EXC_MASK = select.POLLERR | select.POLLHUP | select.POLLNVAL +READ_MASK = select.POLLIN +WRITE_MASK = select.POLLOUT + +class Hub(object): + def __init__(self): + self.runloop = RunLoop(self.wait) + self.descriptor_queue = {} + self.descriptors = {} + self.greenlet = None + self.poll = select.poll() + + def stop(self): + self.process_queue() + self.runloop.abort() + if self.greenlet is not greenlet.getcurrent(): + self.switch() + + def schedule_call(self, *args, **kw): + return self.runloop.schedule_call(*args, **kw) + + def switch(self): + if not self.greenlet: + self.greenlet = greenlib.tracked_greenlet() + args = ((self.runloop.run,),) + else: + args = () + try: + greenlet.getcurrent().parent = self.greenlet + except ValueError: + pass + return greenlib.switch(self.greenlet, *args) + + def add_descriptor(self, fileno, read=None, write=None, exc=None): + if fileno in self.descriptor_queue: + oread, owrite, oexc = self.descriptor_queue[fileno] + read, write, exc = read or oread, write or owrite, exc or oexc + self.descriptor_queue[fileno] = read, write, exc + + def remove_descriptor(self, fileno): + self.descriptor_queue[fileno] = None, None, None + + def exc_descriptor(self, fileno): + # We must handle two cases here, the descriptor + # may be changing or removing its exc handler + # in the queue, or it may be waiting on the queue. + exc = None + try: + exc = self.descriptor_queue[fileno][2] + except KeyError: + try: + exc = self.descriptors[fileno][2] + except KeyError: + pass + if exc is not None: + try: + exc(fileno) + except self.runloop.SYSTEM_EXCEPTIONS: + self.squelch_exception(fileno, sys.exc_info()) + + def squelch_exception(self, fileno, exc_info): + traceback.print_exception(*exc_info) + print >>sys.stderr, "Removing descriptor: %r" % (fileno,) + try: + self.remove_descriptor(fileno) + except Exception, e: + print >>sys.stderr, "Exception while removing descriptor! %r" % (e,) + + def process_queue(self): + d = self.descriptors + reg = self.poll.register + unreg = self.poll.unregister + rm = READ_MASK + wm = WRITE_MASK + for fileno, rwe in self.descriptor_queue.iteritems(): + read, write, exc = rwe + if read is None and write is None and exc is None: + try: + del d[fileno] + except KeyError: + pass + else: + try: + unreg(fileno) + except socket.error: +# print "squelched socket err on unreg", fileno + pass + else: + mask = 0 + if read is not None: + mask |= rm + if write is not None: + mask |= wm + oldmask = 0 + try: + oldr, oldw, olde = d[fileno] + except KeyError: + pass + else: + if oldr is not None: + oldmask |= rm + if oldw is not None: + oldmask |= wm + if mask != oldmask: + reg(fileno, mask) + d[fileno] = rwe + self.descriptor_queue.clear() + + def wait(self, seconds=None): + self.process_queue() + + if not self.descriptors: + if seconds: + sleep(seconds) + return + try: + presult = self.poll.poll(seconds * 1000.0) + except select.error, e: + if e.args[0] == errno.EINTR: + return + raise + SYSTEM_EXCEPTIONS = self.runloop.SYSTEM_EXCEPTIONS + dct = self.descriptors + + for fileno, event in presult: + try: + read, write, exc = dct[fileno] + except KeyError: + continue + + if read is not None and event & READ_MASK: + try: + read(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) + elif exc is not None and event & EXC_MASK: + try: + exc(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) + + if write is not None and event & WRITE_MASK: + try: + write(fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) diff --git a/eventlet/pools.py b/eventlet/pools.py new file mode 100644 index 0000000..7880a79 --- /dev/null +++ b/eventlet/pools.py @@ -0,0 +1,174 @@ +"""\ +@file pools.py +@author Donovan Preston, Aaron Brashears + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import os +import socket + +from eventlet import api +from eventlet import channel +from eventlet import httpc + +class FanFailed(RuntimeError): + pass + + +class SomeFailed(FanFailed): + pass + + +class AllFailed(FanFailed): + pass + + +class Pool(object): + """ + When using the pool, if you do a get, you should ALWAYS do a put. + The pattern is: + + thing = self.pool.get() + try: + # do stuff + finally: + self.pool.put(thing) + """ + def __init__(self, min_size=0, max_size=4): + self.min_size = min_size + self.max_size = max_size + self.current_size = 0 + self.channel = channel.channel() + self.free_items = [] + for x in range(min_size): + self.current_size += 1 + self.free_items.append(self.create()) + + def get(self): + """Return an item from the pool, when one is available + """ + if self.free_items: + return self.free_items.pop(0) + if self.current_size < self.max_size: + self.current_size += 1 + return self.create() + return self.channel.receive() + + def put(self, item): + """Put an item back into the pool, when done + """ + if self.channel.balance < 0: + self.channel.send(item) + else: + self.free_items.append(item) + + def free(self): + """Return the number of free items in the pool. + """ + return len(self.free_items) + self.max_size - self.current_size + + def waiting(self): + """Return the number of routines waiting for a pool item. + """ + if self.channel.balance < 0: + return -self.channel.balance + return 0 + + def create(self): + """Generate a new pool item + """ + raise NotImplementedError("Implement in subclass") + + def fan(self, block, input_list): + chan = channel.channel() + results = [] + exceptional_results = 0 + for index, input_item in enumerate(input_list): + pool_item = self.get() + + ## Fan out + api.spawn( + self._invoke, block, pool_item, input_item, index, chan) + + ## Fan back in + for i in range(len(input_list)): + ## Wait for all guys to send to the queue + index, value = chan.receive() + if isinstance(value, Exception): + exceptional_results += 1 + results.append((index, value)) + + results.sort() + results = [value for index, value in results] + + if exceptional_results: + if exceptional_results == len(results): + raise AllFailed(results) + raise SomeFailed(results) + return results + + def _invoke(self, block, pool_item, input_item, index, chan): + try: + result = block(pool_item, input_item) + except Exception, e: + self.put(pool_item) + chan.send((index, e)) + return + self.put(pool_item) + chan.send((index, result)) + + +class Token(object): + pass + + +class TokenPool(Pool): + """A pool which gives out tokens, an object indicating that + the person who holds the token has a right to consume some + limited resource. + """ + def create(self): + return Token() + + +class ConnectionPool(Pool): + """A Pool which can limit the number of concurrent http operations + being made to a given server. + + *NOTE: *TODO: + + This does NOT currently keep sockets open. It discards the created + http object when it is put back in the pool. This is because we do + not yet have a combination of http clients and servers which can work + together to do HTTP keepalive sockets without errors. + """ + def __init__(self, proto, netloc, use_proxy, min_size=0, max_size=4): + self.proto = proto + self.netloc = netloc + self.use_proxy = use_proxy + Pool.__init__(self, min_size, max_size) + + def create(self): + return httpc.make_connection(self.proto, self.netloc, self.use_proxy) + + def put(self, item): + ## Discard item, create a new connection for the pool + Pool.put(self, self.create()) diff --git a/eventlet/pools_test.py b/eventlet/pools_test.py new file mode 100644 index 0000000..e604bc1 --- /dev/null +++ b/eventlet/pools_test.py @@ -0,0 +1,179 @@ +"""\ +@file test_pools.py +@author Donovan Preston, Aaron Brashears + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import unittest + +from eventlet import api +from eventlet import channel +from eventlet import pools + + +class IntPool(pools.Pool): + def create(self): + self.current_integer = getattr(self, 'current_integer', 0) + 1 + return self.current_integer + + +class TestIntPool(unittest.TestCase): + mode = 'static' + def setUp(self): + self.pool = IntPool(min_size=0, max_size=4) + + def test_integers(self): + # Do not actually use this pattern in your code. The pool will be + # exhausted, and unrestoreable. + # If you do a get, you should ALWAYS do a put, probably like this: + # try: + # thing = self.pool.get() + # # do stuff + # finally: + # self.pool.put(thing) + + # with self.pool.some_api_name() as thing: + # # do stuff + self.assertEquals(self.pool.get(), 1) + self.assertEquals(self.pool.get(), 2) + self.assertEquals(self.pool.get(), 3) + self.assertEquals(self.pool.get(), 4) + + def test_free(self): + self.assertEquals(self.pool.free(), 4) + gotten = self.pool.get() + self.assertEquals(self.pool.free(), 3) + self.pool.put(gotten) + self.assertEquals(self.pool.free(), 4) + + def test_exhaustion(self): + waiter = channel.channel() + def consumer(): + gotten = None + try: + gotten = self.pool.get() + finally: + waiter.send(gotten) + + api.spawn(consumer) + + one, two, three, four = ( + self.pool.get(), self.pool.get(), self.pool.get(), self.pool.get()) + self.assertEquals(self.pool.free(), 0) + + # Let consumer run; nothing will be in the pool, so he will wait + api.sleep(0) + + # Wake consumer + self.pool.put(one) + + # wait for the consumer + self.assertEquals(waiter.receive(), one) + + def test_blocks_on_pool(self): + waiter = channel.channel() + def greedy(): + self.pool.get() + self.pool.get() + self.pool.get() + self.pool.get() + # No one should be waiting yet. + self.assertEquals(self.pool.waiting(), 0) + # The call to the next get will unschedule this routine. + self.pool.get() + # So this send should never be called. + waiter.send('Failed!') + + killable = api.spawn(greedy) + + # no one should be waiting yet. + self.assertEquals(self.pool.waiting(), 0) + + ## Wait for greedy + api.sleep(0) + + ## Greedy should be blocking on the last get + self.assertEquals(self.pool.waiting(), 1) + + ## Send will never be called, so balance should be 0. + self.assertEquals(waiter.balance, 0) + + api.kill(killable) + + +class TestAbstract(unittest.TestCase): + mode = 'static' + def test_abstract(self): + ## Going for 100% coverage here + ## A Pool cannot be used without overriding create() + pool = pools.Pool() + self.assertRaises(NotImplementedError, pool.get) + + +class TestIntPool2(unittest.TestCase): + mode = 'static' + def setUp(self): + self.pool = IntPool(min_size=3, max_size=3) + + def test_something(self): + self.assertEquals(len(self.pool.free_items), 3) + ## Cover the clause in get where we get from the free list instead of creating + ## an item on get + gotten = self.pool.get() + self.assertEquals(gotten, 1) + + +ALWAYS = RuntimeError('I always fail') +SOMETIMES = RuntimeError('I fail half the time') + + +class TestFan(unittest.TestCase): + mode = 'static' + def setUp(self): + self.pool = IntPool(max_size=2) + + def test_with_list(self): + list_of_input = ['agent-one', 'agent-two', 'agent-three'] + + def my_callable(pool_item, next_thing): + ## Do some "blocking" (yielding) thing + api.sleep(0.01) + return next_thing + + output = self.pool.fan(my_callable, list_of_input) + self.assertEquals(list_of_input, output) + + def test_all_fail(self): + def my_failure(pool_item, next_thing): + raise ALWAYS + self.assertRaises(pools.AllFailed, self.pool.fan, my_failure, range(4)) + + def test_some_fail(self): + def my_failing_callable(pool_item, next_thing): + if next_thing % 2: + raise SOMETIMES + return next_thing + self.assertRaises(pools.SomeFailed, self.pool.fan, my_failing_callable, range(4)) + + +if __name__ == '__main__': + unittest.main() + diff --git a/eventlet/processes.py b/eventlet/processes.py new file mode 100644 index 0000000..8c9aea0 --- /dev/null +++ b/eventlet/processes.py @@ -0,0 +1,138 @@ +"""\ +@file processes.py + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +import os +import popen2 +import signal + + +from eventlet import util, pools +from eventlet import wrappedfd + +class DeadProcess(RuntimeError): + pass + + +class Process(object): + process_number = 0 + def __init__(self, command, args, dead_callback=lambda:None): + self.process_number = self.process_number + 1 + Process.process_number = self.process_number + self.command = command + self.args = args + self._dead_callback = dead_callback + self.dead = False + self.started = False + self.popen4 = None + + ## We use popen4 so that read() will read from either stdout or stderr + self.popen4 = popen2.Popen4([self.command] + self.args) + child_stdout_stderr = self.popen4.fromchild + child_stdin = self.popen4.tochild + util.set_nonblocking(child_stdout_stderr) + util.set_nonblocking(child_stdin) + self.child_stdout_stderr = wrappedfd.wrapped_file(child_stdout_stderr) + self.child_stdout_stderr.newlines = '\n' # the default is \r\n, which aren't sent over pipes + self.child_stdin = wrappedfd.wrapped_file(child_stdin) + self.child_stdin.newlines = '\n' + + self.sendall = self.child_stdin.write + self.send = self.child_stdin.write + self.recv = self.child_stdout_stderr.read + self.readline = self.child_stdout_stderr.readline + + def dead_callback(self): + self.dead = True + if self._dead_callback: + self._dead_callback() + + def makefile(self, mode, *arg): + if mode.startswith('r'): + return self.child_stdout_stderr + if mode.startswith('w'): + return self.child_stdin + raise RuntimeError("Unknown mode", mode) + + def read(self, amount=None): + result = self.child_stdout_stderr.read(amount) + if result == '': + # This process is dead. + self.dead_callback() + raise DeadProcess + return result + + def write(self, stuff): + written = self.child_stdin.send(stuff) + try: + self.child_stdin.flush() + except ValueError, e: + ## File was closed + assert str(e) == 'I/O operation on closed file' + if written == 0: + self.dead_callback() + raise DeadProcess + + def flush(self): + self.child_stdin.flush() + + def close(self): + self.child_stdout_stderr.close() + self.child_stdin.close() + self.dead_callback() + + def close_stdin(self): + self.child_stdin.close() + + def kill(self, sig=None): + if sig == None: + sig = signal.SIGTERM + os.kill(self.popen4.pid, sig) + + def getpid(self): + return self.popen4.pid + + +class ProcessPool(pools.Pool): + def __init__(self, command, args=None, min_size=0, max_size=4): + """@param command the command to run + """ + self.command = command + if args is None: + args = [] + self.args = args + pools.Pool.__init__(self, min_size, max_size) + + def create(self): + """Generate a process + """ + def dead_callback(): + self.current_size -= 1 + return Process(self.command, self.args, dead_callback) + + def put(self, item): + if not item.dead: + if item.popen4.poll() != -1: + item.dead_callback() + else: + pools.Pool.put(self, item) diff --git a/eventlet/processes_test.py b/eventlet/processes_test.py new file mode 100644 index 0000000..2cbab7d --- /dev/null +++ b/eventlet/processes_test.py @@ -0,0 +1,134 @@ +"""\ +@file processes_test.py +@author Donovan Preston, Aaron Brashears + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" +from eventlet import tests +from eventlet import api +from eventlet import channel +from eventlet import processes + +class TestEchoPool(tests.TestCase): + mode = 'static' + def setUp(self): + self.pool = processes.ProcessPool('echo', ["hello"]) + + def test_echo(self): + result = None + + proc = self.pool.get() + try: + result = proc.read() + finally: + self.pool.put(proc) + self.assertEquals(result, 'hello\n') + + def test_read_eof(self): + proc = self.pool.get() + try: + proc.read() + self.assertRaises(processes.DeadProcess, proc.read) + finally: + self.pool.put(proc) + + +class TestCatPool(tests.TestCase): + mode = 'static' + def setUp(self): + self.pool = processes.ProcessPool('cat') + + def test_cat(self): + result = None + + proc = self.pool.get() + try: + proc.write('goodbye') + proc.close_stdin() + result = proc.read() + finally: + self.pool.put(proc) + + self.assertEquals(result, 'goodbye') + + def test_write_to_dead(self): + result = None + + proc = self.pool.get() + try: + proc.write('goodbye') + proc.close_stdin() + result = proc.read() + self.assertRaises(processes.DeadProcess, proc.write, 'foo') + finally: + self.pool.put(proc) + + def test_close(self): + result = None + + proc = self.pool.get() + try: + proc.write('hello') + proc.close() + self.assertRaises(processes.DeadProcess, proc.write, 'goodbye') + finally: + self.pool.put(proc) + + +class TestDyingProcessesLeavePool(tests.TestCase): + mode = 'static' + def setUp(self): + self.pool = processes.ProcessPool('echo', ['hello'], max_size=1) + + def test_dead_process_not_inserted_into_pool(self): + proc = self.pool.get() + try: + result = proc.read() + self.assertEquals(result, 'hello\n') + finally: + self.pool.put(proc) + proc2 = self.pool.get() + self.assert_(proc is not proc2) + + +class TestProcessLivesForever(tests.TestCase): + mode = 'static' + def setUp(self): + self.pool = processes.ProcessPool('yes', max_size=1) + + def test_reading_twice_from_same_process(self): + proc = self.pool.get() + try: + result = proc.read(2) + self.assertEquals(result, 'y\n') + finally: + self.pool.put(proc) + + proc2 = self.pool.get() + self.assert_(proc is proc2) + try: + result = proc2.read(2) + self.assertEquals(result, 'y\n') + finally: + self.pool.put(proc2) + + +if __name__ == '__main__': + tests.main() diff --git a/eventlet/pylibsupport.py b/eventlet/pylibsupport.py new file mode 100644 index 0000000..1378e7a --- /dev/null +++ b/eventlet/pylibsupport.py @@ -0,0 +1,42 @@ +"""\ +@file pylibsupport.py +@author Donovan Preston + +Copyright (c) 2005-2006, Donovan Preston +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +from py.magic import greenlet + + +import sys +import types + + +def emulate(): + module = types.ModuleType('greenlet') + sys.modules['greenlet'] = module + module.greenlet = greenlet + module.getcurrent = greenlet.getcurrent + module.GreenletExit = greenlet.GreenletExit + + + diff --git a/eventlet/runloop.py b/eventlet/runloop.py new file mode 100644 index 0000000..252e3c9 --- /dev/null +++ b/eventlet/runloop.py @@ -0,0 +1,200 @@ +"""\ +@file runloop.py +@author Bob Ippolito + +Defines the core eventlet runloop. The runloop keeps track of scheduled +events and observers which watch for specific portions of the runloop to +be executed. + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import time +import bisect +import sys +import traceback + +from eventlet.timer import Timer + + +class RunLoop(object): + SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit) + + def __init__(self, wait=None, clock=None): + if clock is None: + clock = self.default_clock() + self.clock = clock + if wait is None: + wait = self.default_wait + self.wait = wait + self.stopping = False + self.running = False + self.timers = [] + self.next_timers = [] + self.observers = {} + self.observer_modes = { + 'entry': [], + 'before_timers': [], + 'before_waiting': [], + 'after_waiting': [], + 'exit': [], + } + + def default_wait(self, time): + return None + + def default_clock(self): + return time.time + + def default_sleep(self): + return 60.0 + + def sleep_until(self): + t = self.timers + if not t: + return None + return t[0][0] + + def run(self): + """Run the runloop until abort is called. + """ + if self.running: + raise RuntimeError("Already running!") + try: + self.running = True + self.stopping = False + self.fire_observers('entry') + while not self.stopping: + self.prepare_timers() + self.fire_observers('before_timers') + self.fire_timers(self.clock()) + self.prepare_timers() + wakeup_when = self.sleep_until() + if wakeup_when is None: + sleep_time = self.default_sleep() + else: + sleep_time = wakeup_when - self.clock() + if sleep_time > 0: + self.fire_observers('before_waiting') + self.wait(sleep_time) + self.fire_observers('after_waiting') + else: + self.wait(0) + else: + del self.timers[:] + del self.next_timers[:] + self.fire_observers('exit') + finally: + self.running = False + self.stopping = False + + def abort(self): + """Stop the runloop. If run is executing, it will exit after completing + the next runloop iteration. + """ + if self.running: + self.stopping = True + + def add_observer(self, observer, *modes): + """Add an event observer to this runloop with the given modes. + Valid modes are: + entry: The runloop is being entered. + before_timers: Before the expired timers for this iteration are executed. + before_waiting: Before waiting for the calculated wait_time + where nothing will happen. + after_waiting: After waiting, immediately before starting the top of the + runloop again. + exit: The runloop is exiting. + + If no mode is passed or mode is all, the observer will be fired for every + event type. + """ + if not modes or modes == ('all',): + modes = tuple(self.observer_modes) + self.observers[observer] = modes + for mode in modes: + self.observer_modes[mode].append(observer) + + def remove_observer(self, observer): + """Remove a previously registered observer from all event types. + """ + for mode in self.observers.pop(observer, ()): + self.observer_modes[mode].remove(observer) + + def squelch_observer_exception(self, observer, exc_info): + traceback.print_exception(*exc_info) + print >>sys.stderr, "Removing observer: %r" % (observer,) + self.remove_observer(observer) + + def fire_observers(self, activity): + for observer in self.observer_modes[activity]: + try: + observer(self, activity) + except self.SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_observer_exception(observer, sys.exc_info()) + + def squelch_timer_exception(self, timer, exc_info): + traceback.print_exception(*exc_info) + print >>sys.stderr, "Timer raised: %r" % (timer,) + + def _add_absolute_timer(self, when, info): + # the 0 placeholder makes it easy to bisect_right using (now, 1) + self.next_timers.append((when, 0, info)) + + def add_timer(self, timer): + scheduled_time = self.clock() + timer.seconds + self._add_absolute_timer(scheduled_time, timer) + return scheduled_time + + def prepare_timers(self): + ins = bisect.insort_right + t = self.timers + for item in self.next_timers: + ins(t, item) + del self.next_timers[:] + + def schedule_call(self, seconds, cb, *args, **kw): + """Schedule a callable to be called after 'seconds' seconds have + elapsed. + seconds: The number of seconds to wait. + cb: The callable to call after the given time. + *args: Arguments to pass to the callable when called. + **kw: Keyword arguments to pass to the callable when called. + """ + t = Timer(seconds, cb, *args, **kw) + self.add_timer(t) + return t + + def fire_timers(self, when): + t = self.timers + last = bisect.bisect_right(t, (when, 1)) + i = 0 + for i in xrange(last): + timer = t[i][2] + try: + timer() + except self.SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_timer_exception(timer, sys.exc_info()) + del t[:last] diff --git a/eventlet/runloop_test.py b/eventlet/runloop_test.py new file mode 100644 index 0000000..90e7293 --- /dev/null +++ b/eventlet/runloop_test.py @@ -0,0 +1,157 @@ +"""\ +@file test_runloop.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import time +import StringIO +import unittest + +from eventlet import runloop + + +class TestRunloop(unittest.TestCase): + mode = 'static' + def test_empty(self): + r = runloop.RunLoop() + r.schedule_call(0, r.abort) + r.run() + assert not r.running + + + def test_timer(self): + r = runloop.RunLoop() + r.schedule_call(0.125, r.abort) + start_time = time.time() + r.run() + assert time.time() - start_time >= 0.125 + assert not r.running + + def test_observer(self): + observed = [] + r = runloop.RunLoop() + r.add_observer(lambda runloop, activity: observed.append(activity)) + r.schedule_call(0, r.abort) + r.run() + assert observed == ['entry', 'before_timers', 'before_waiting', 'after_waiting', 'exit'] + assert not r.running + + + def test_remove_observer(self): + r = runloop.RunLoop() + + observed = [] + def observe(runloop, mode): + observed.append(mode) + r.remove_observer(observe) + + looped = [] + def run_loop_twice(runloop, mode): + if looped: + r.abort() + else: + looped.append(True) + + r.add_observer(observe, 'before_timers') + r.add_observer(run_loop_twice, 'after_waiting') + r.run() + assert len(observed) == 1 + assert not r.running + + def test_observer_exception(self): + r = runloop.RunLoop() + + observed = [] + def observe(runloop, mode): + observed.append(mode) + raise Exception("Squelch me please") + + looped = [] + def run_loop_twice(runloop, mode): + if looped: + r.abort() + else: + looped.append(True) + + saved = sys.stderr + sys.stderr = err = StringIO.StringIO() + + r.add_observer(observe, 'before_timers') + r.add_observer(run_loop_twice, 'after_waiting') + r.run() + + err.seek(0) + sys.stderr = saved + + assert len(observed) == 1 + assert err.read() + assert not r.running + + def test_timer_exception(self): + r = runloop.RunLoop() + + observed = [] + def timer(): + observed.append(True) + raise Exception("Squelch me please") + + looped = [] + def run_loop_twice(runloop, mode): + if looped: + r.abort() + else: + looped.append(True) + + saved = sys.stderr + sys.stderr = err = StringIO.StringIO() + + r.schedule_call(0, timer) + r.add_observer(run_loop_twice, 'after_waiting') + r.run() + + err.seek(0) + sys.stderr = saved + + assert len(observed) == 1 + assert err.read() + assert not r.running + + def test_timer_system_exception(self): + r = runloop.RunLoop() + def timer(): + raise SystemExit + + r.schedule_call(0, timer) + + caught = [] + try: + r.run() + except SystemExit: + caught.append(True) + + assert caught + assert not r.running + +if __name__ == '__main__': + unittest.main() + diff --git a/eventlet/selecthub.py b/eventlet/selecthub.py new file mode 100644 index 0000000..e9b1e41 --- /dev/null +++ b/eventlet/selecthub.py @@ -0,0 +1,173 @@ +"""\ +@file selecthub.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import select +import errno +import traceback +import time +from bisect import insort, bisect_left + +from eventlet import greenlib +from eventlet.runloop import RunLoop, Timer + +import greenlet + +class Hub(object): + def __init__(self): + self.runloop = RunLoop(self.wait) + self.readers = {} + self.writers = {} + self.excs = {} + self.descriptors = {} + self.descriptor_queue = {} + self.greenlet = None + + def stop(self): + self.process_queue() + self.runloop.abort() + if self.greenlet is not greenlet.getcurrent(): + self.switch() + + def schedule_call(self, *args, **kw): + return self.runloop.schedule_call(*args, **kw) + + def switch(self): + if not self.greenlet: + self.greenlet = greenlib.tracked_greenlet() + args = ((self.runloop.run,),) + else: + args = () + try: + greenlet.getcurrent().parent = self.greenlet + except ValueError: + pass + return greenlib.switch(self.greenlet, *args) + + def add_descriptor(self, fileno, read=None, write=None, exc=None): + self.descriptor_queue[fileno] = read, write, exc + + def remove_descriptor(self, fileno): + self.descriptor_queue[fileno] = None, None, None + + def exc_descriptor(self, fileno): + # We must handle two cases here, the descriptor + # may be changing or removing its exc handler + # in the queue, or it may be waiting on the queue. + exc = None + try: + exc = self.descriptor_queue[fileno][2] + except KeyError: + try: + exc = self.excs[fileno] + except KeyError: + pass + if exc is not None: + try: + exc() + except self.runloop.SYSTEM_EXCEPTIONS: + self.squelch_exception(fileno, sys.exc_info()) + + def squelch_exception(self, fileno, exc_info): + traceback.print_exception(*exc_info) + print >>sys.stderr, "Removing descriptor: %r" % (fileno,) + try: + self.remove_descriptor(fileno) + except Exception, e: + print >>sys.stderr, "Exception while removing descriptor! %r" % (e,) + + def process_queue(self): + readers = self.readers + writers = self.writers + excs = self.excs + descriptors = self.descriptors + for fileno, rwe in self.descriptor_queue.iteritems(): + read, write, exc = rwe + if read is None and write is None and exc is None: + try: + del descriptors[fileno] + except KeyError: + continue + try: + del readers[fileno] + except KeyError: + pass + try: + del writers[fileno] + except KeyError: + pass + try: + del excs[fileno] + except KeyError: + pass + else: + if read is not None: + readers[fileno] = read + else: + try: + del readers[fileno] + except KeyError: + pass + if write is not None: + writers[fileno] = write + else: + try: + del writers[fileno] + except KeyError: + pass + if exc is not None: + excs[fileno] = exc + else: + try: + del excs[fileno] + except KeyError: + pass + descriptors[fileno] = rwe + self.descriptor_queue.clear() + + def wait(self, seconds=None): + self.process_queue() + if not self.descriptors: + if seconds: + time.sleep(seconds) + return + readers = self.readers + writers = self.writers + excs = self.excs + try: + r, w, ig = select.select(readers, writers, [], seconds) + except select.error, e: + if e.args[0] == errno.EINTR: + return + raise + SYSTEM_EXCEPTIONS = self.runloop.SYSTEM_EXCEPTIONS + for observed, events in ((readers, r), (writers, w)): + for fileno in events: + try: + observed[fileno](fileno) + except SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_exception(fileno, sys.exc_info()) diff --git a/eventlet/stacklesssupport.py b/eventlet/stacklesssupport.py new file mode 100644 index 0000000..8e97555 --- /dev/null +++ b/eventlet/stacklesssupport.py @@ -0,0 +1,110 @@ +"""\ +@file __init__.py +@brief Support for using stackless python. Broken and riddled with +print statements at the moment. Please fix it! + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import types + +import stackless +import traceback + + +caller = None + + +coro_args = {} + + +tasklet_to_greenlet = {} + + +def getcurrent(): + return tasklet_to_greenlet[stackless.getcurrent()] + + +class FirstSwitch(object): + def __init__(self, gr): + self.gr = gr + + def __call__(self, *args, **kw): + print "first call", args, kw + gr = self.gr + del gr.switch + run, gr.run = gr.run, None + t = stackless.tasklet(run) + gr.t = t + tasklet_to_greenlet[t] = gr + t.setup(*args, **kw) + result = t.run() + + +class greenlet(object): + def __init__(self, run=None, parent=None): + self.dead = False + if parent is None: + parent = getcurrent() + + self.parent = parent + if run is not None: + self.run = run + + self.switch = FirstSwitch(self) + + def switch(self, *args): + print "switch", args + global caller + caller = stackless.getcurrent() + coro_args[self] = args + self.t.insert() + stackless.schedule() + if caller is not self.t: + caller.remove() + rval = coro_args[self] + return rval + + def run(self): + pass + + def __bool__(self): + return self.run is None and not self.dead + + +class GreenletExit(Exception): + pass + + +def emulate(): + module = types.ModuleType('greenlet') + sys.modules['greenlet'] = module + module.greenlet = greenlet + module.getcurrent = getcurrent + module.GreenletExit = GreenletExit + + caller = t = stackless.getcurrent() + tasklet_to_greenlet[t] = None + main_coro = greenlet() + tasklet_to_greenlet[t] = main_coro + main_coro.t = t + del main_coro.switch ## It's already running + coro_args[main_coro] = None diff --git a/eventlet/tests.py b/eventlet/tests.py new file mode 100644 index 0000000..971509a --- /dev/null +++ b/eventlet/tests.py @@ -0,0 +1,36 @@ +"""\ +@file tests.py +@author Donovan Preston +@brief Indirection layer for tests in case we want to fix unittest. + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import atexit +import sys +import unittest + + +TestCase = unittest.TestCase + + +name = getattr(sys.modules['__main__'], '__name__', None) + +main = unittest.main diff --git a/eventlet/timer.py b/eventlet/timer.py new file mode 100644 index 0000000..2613a61 --- /dev/null +++ b/eventlet/timer.py @@ -0,0 +1,72 @@ +"""\ +@file timer.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" +from eventlet.api import get_hub + +class Timer(object): + __slots__ = ['seconds', 'tpl', 'called', 'cancelled', 'scheduled_time'] + def __init__(self, seconds, cb, *args, **kw): + """Create a timer. + seconds: The minimum number of seconds to wait before calling + cb: The callback to call when the timer has expired + *args: The arguments to pass to cb + **kw: The keyword arguments to pass to cb + + This timer will not be run unless it is scheduled in a runloop by + calling timer.schedule() or runloop.add_timer(timer). + """ + self.cancelled = False + self.seconds = seconds + self.tpl = cb, args, kw + self.called = False + + def __repr__(self): + secs = getattr(self, 'seconds', None) + cb, args, kw = getattr(self, 'tpl', (None, None, None)) + return "Timer(%s, %s, *%s, **%s)" % ( + secs, cb, args, kw) + + def copy(self): + cb, args, kw = self.tpl + return self.__class__(self.seconds, cb, *args, **kw) + + def schedule(self): + """Schedule this timer to run in the current runloop. + """ + self.called = False + self.scheduled_time = get_hub().runloop.add_timer(self) + return self + + def __call__(self): + if not self.called: + self.called = True + cb, args, kw = self.tpl + cb(*args, **kw) + + def cancel(self): + """Prevent this timer from being called. If the timer has already + been called, has no effect. + """ + self.cancelled = True + self.called = True diff --git a/eventlet/timer_test.py b/eventlet/timer_test.py new file mode 100644 index 0000000..f9a1e22 --- /dev/null +++ b/eventlet/timer_test.py @@ -0,0 +1,66 @@ +"""\ +@file test_timer.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import unittest + +from eventlet import api, runloop, tests, timer + +class TestTimer(tests.TestCase): + mode = 'static' + def test_copy(self): + t = timer.Timer(0, lambda: None) + t2 = t.copy() + assert t.seconds == t2.seconds + assert t.tpl == t2.tpl + assert t.called == t2.called + + def test_cancel(self): + r = runloop.RunLoop() + called = [] + t = timer.Timer(0, lambda: called.append(True)) + t.cancel() + r.add_timer(t) + r.add_observer(lambda r, activity: r.abort(), 'after_waiting') + r.run() + assert not called + assert not r.running + + def test_schedule(self): + hub = api.get_hub() + r = hub.runloop + # clean up the runloop, preventing side effects from previous tests + # on this thread + if r.running: + r.abort() + api.sleep(0) + called = [] + t = timer.Timer(0, lambda: (called.append(True), hub.runloop.abort())) + t.schedule() + r.default_sleep = lambda: 0.0 + r.run() + assert called + assert not r.running + +if __name__ == '__main__': + unittest.main() diff --git a/eventlet/tls.py b/eventlet/tls.py new file mode 100644 index 0000000..3cb3921 --- /dev/null +++ b/eventlet/tls.py @@ -0,0 +1,57 @@ +"""\ +@file tls.py +@author Donovan Preston + +Copyright (c) 2006-2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import threading +import weakref + +__all__ = ['local'] + +class _local(object): + """ + Crappy Python 2.3 compatible implementation of thread-local storage + """ + + __slots__ = ('__thread_dict__',) + + def __init__(self): + object.__setattr__(self, '__thread_dict__', weakref.WeakKeyDictionary()) + + def __getattr__(self, attr): + try: + return self.__thread_dict__[threading.currentThread()][attr] + except KeyError: + raise AttributeError(attr) + + def __setattr__(self, attr, value): + t = threading.currentThread() + try: + d = self.__thread_dict__[t] + except KeyError: + d = self.__thread_dict__[t] = {} + d[attr] = value + +try: + local = threading.local +except AttributeError: + local = _local diff --git a/eventlet/twistedsupport.py b/eventlet/twistedsupport.py new file mode 100644 index 0000000..5bbbc8b --- /dev/null +++ b/eventlet/twistedsupport.py @@ -0,0 +1,134 @@ +"""\ +@file twistedsupport.py +@author Donovan Preston + +Copyright (c) 2005-2006, Donovan Preston +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import traceback + +from eventlet import api +from eventlet import timer + +from twisted.internet import posixbase +from twisted.internet.interfaces import IReactorFDSet + +try: + from zope.interface import implements + _working = True +except ImportError: + _working = False + def implements(*args, **kw): + pass + + +class TwistedTimer(object): + def __init__(self, timer): + self.timer = timer + + def cancel(self): + self.timer.cancel() + + def getTime(self): + return self.timer.scheduled_time + + def delay(self, seconds): + hub = api.get_hub() + new_time = hub.clock() - self.timer_scheduled_time + seconds + self.timer.cancel() + cb, args, kw = self.timer.tpl + self.timer = hub.schedule_call(new_time, cb, *args, **kw) + + def reset(self, new_time): + self.timer.cancel() + cb, args, kw = self.timer.tpl + self.timer = api.get_hub().schedule_call(new_time, cb, *args, **kw) + + def active(self): + return not self.timer.called + + +class EventletReactor(posixbase.PosixReactorBase): + implements(IReactorFDSet) + + def __init__(self, *args, **kw): + self._readers = {} + self._writers = {} + posixbase.PosixReactorBase.__init__(self, *args, **kw) + + def callLater(self, func, *args, **kw): + return TwistedTimer(api.call_after(func, *args, **kw)) + + def run(self): + self.running = True + self._stopper = api.call_after(sys.maxint / 1000.0, lambda: None) + ## schedule a call way in the future, and cancel it in stop? + api.get_hub().runloop.run() + + def stop(self): + self._stopper.cancel() + posixbase.PosixReactorBase.stop(self) + api.get_hub().remove_descriptor(self._readers.keys()[0]) + api.get_hub().runloop.abort() + + def addReader(self, reader): + fileno = reader.fileno() + self._readers[fileno] = reader + api.get_hub().add_descriptor(fileno, read=self._got_read) + + def _got_read(self, fileno): + self._readers[fileno].doRead() + + def addWriter(self, writer): + fileno = writer.fileno() + self._writers[fileno] = writer + api.get_hub().add_descriptor(fileno, write=self._got_write) + + def _got_write(self, fileno): + self._writers[fileno].doWrite() + + def removeReader(self, reader): + fileno = reader.fileno() + if fileno in self._readers: + self._readers.pop(fileno) + api.get_hub().remove_descriptor(fileno) + + def removeWriter(self, writer): + fileno = writer.fileno() + if fileno in self._writers: + self._writers.pop(fileno) + api.get_hub().remove_descriptor(fileno) + + def removeAll(self): + return self._removeAll(self._readers.values(), self._writers.values()) + + +def emulate(): + if not _working: + raise RuntimeError, "Can't use twistedsupport because zope.interface is not installed." + reactor = EventletReactor() + from twisted.internet.main import installReactor + installReactor(reactor) + + +__all__ = ['emulate'] + diff --git a/eventlet/util.py b/eventlet/util.py new file mode 100644 index 0000000..657716c --- /dev/null +++ b/eventlet/util.py @@ -0,0 +1,214 @@ +"""\ +@file util.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import os +import fcntl +import socket +import errno +from errno import EWOULDBLOCK, EAGAIN + +try: + from OpenSSL import SSL +except ImportError: + class SSL(object): + class WantWriteError(object): + pass + + class WantReadError(object): + pass + + class ZeroReturnError(object): + pass + + class SysCallError(object): + pass + + +def g_log(*args): + import sys + import greenlet + from eventlet.greenlib import greenlet_id + g_id = greenlet_id() + if g_id is None: + if greenlet.getcurrent().parent is None: + ident = 'greenlet-main' + else: + g_id = id(greenlet.getcurrent()) + if g_id < 0: + g_id += 1 + ((sys.maxint + 1) << 1) + ident = '%08X' % (g_id,) + else: + ident = 'greenlet-%d' % (g_id,) + print >>sys.stderr, '[%s] %s' % (ident, ' '.join(map(str, args))) + +CONNECT_ERR = (errno.EINPROGRESS, errno.EALREADY, EWOULDBLOCK) +CONNECT_SUCCESS = (0, errno.EISCONN) +def socket_connect(descriptor, address): + err = descriptor.connect_ex(address) + if err in CONNECT_ERR: + return None + if err not in CONNECT_SUCCESS: + raise socket.error(err, errno.errorcode[err]) + return descriptor + +__original_socket__ = socket.socket + +def tcp_socket(): + s = __original_socket__(socket.AF_INET, socket.SOCK_STREAM) + set_nonblocking(s) + return s + + +__original_ssl__ = socket.ssl + + +def wrap_ssl(sock, certificate=None, private_key=None): + from OpenSSL import SSL + from eventlet import wrappedfd, util + context = SSL.Context(SSL.SSLv23_METHOD) + print certificate, private_key + if certificate is not None: + context.use_certificate_file(certificate) + if private_key is not None: + context.use_privatekey_file(private_key) + context.set_verify(SSL.VERIFY_NONE, lambda *x: True) + + ## TODO only do this on client sockets? how? + connection = SSL.Connection(context, sock) + connection.set_connect_state() + return wrappedfd.wrapped_fd(connection) + + +def wrap_socket_with_coroutine_socket(): + def new_socket(*args, **kw): + from eventlet import wrappedfd + s = __original_socket__(*args, **kw) + set_nonblocking(s) + return wrappedfd.wrapped_fd(s) + socket.socket = new_socket + + socket.ssl = wrap_ssl + + +def socket_bind_and_listen(descriptor, addr=('', 0), backlog=50): + set_reuse_addr(descriptor) + descriptor.bind(addr) + descriptor.listen(backlog) + return descriptor + +def socket_accept(descriptor): + try: + return descriptor.accept() + except socket.error, e: + if e[0] == EWOULDBLOCK: + return None + raise + +def socket_send(descriptor, data): + try: + return descriptor.send(data) + except socket.error, e: + if e[0] == EWOULDBLOCK: + return 0 + raise + except SSL.WantWriteError: + return 0 + except SSL.WantReadError: + return 0 + + +# winsock sometimes throws ENOTCONN +SOCKET_CLOSED = (errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN) +def socket_recv(descriptor, buflen): + try: + return descriptor.recv(buflen) + except socket.error, e: + if e[0] == EWOULDBLOCK: + return None + if e[0] in SOCKET_CLOSED: + return '' + raise + except SSL.WantReadError: + return None + except SSL.ZeroReturnError: + return '' + except SSL.SysCallError, e: + if e[0] == -1 or e[0] > 0: + raise socket.error(errno.ECONNRESET, errno.errorcode[errno.ECONNRESET]) + raise + + +def file_recv(fd, buflen): + try: + return fd.read(buflen) + except IOError, e: + if e[0] == EAGAIN: + return None + return '' + except socket.error, e: + if e[0] == errno.EPIPE: + return '' + raise + + +def file_send(fd, data): + try: + fd.write(data) + fd.flush() + return len(data) + except IOError, e: + if e[0] == EAGAIN: + return 0 + except ValueError, e: + written = 0 + except socket.error, e: + if e[0] == errno.EPIPE: + written = 0 + + +def set_reuse_addr(descriptor): + try: + descriptor.setsockopt( + socket.SOL_SOCKET, + socket.SO_REUSEADDR, + descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1, + ) + except socket.error: + pass + +def set_nonblocking(descriptor): + if hasattr(descriptor, 'setblocking'): + # socket + descriptor.setblocking(0) + else: + # fd + if hasattr(descriptor, 'fileno'): + fd = descriptor.fileno() + else: + fd = descriptor + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + return descriptor + diff --git a/eventlet/wrappedfd.py b/eventlet/wrappedfd.py new file mode 100644 index 0000000..32b0126 --- /dev/null +++ b/eventlet/wrappedfd.py @@ -0,0 +1,262 @@ +"""\ +@file wrappedfd.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" +from eventlet.api import trampoline, get_hub +from eventlet import util + +BUFFER_SIZE = 4096 + +import socket, errno + + +def higher_order_recv(recv_func): + def recv(self, buflen): + buf = self.recvbuffer + if buf: + chunk, self.recvbuffer = buf[:buflen], buf[buflen:] + return chunk + fd = self.fd + bytes = recv_func(fd, buflen) + while bytes is None: + try: + trampoline(fd, read=True) + except socket.error, e: + if e[0] == errno.EPIPE: + bytes = '' + else: + raise + else: + bytes = recv_func(fd, buflen) + self.recvcount += len(bytes) + return bytes + return recv + + +def higher_order_send(send_func): + def send(self, data): + count = send_func(self.fd, data) + if not count: + return 0 + self.sendcount += count + return count + return send + + + +class wrapped_fd(object): + newlines = '\r\n' + mode = 'wb+' + is_secure = False + + def __init__(self, fd): + self._closed = False + self.fd = fd + self._fileno = fd.fileno() + self.recvbuffer = '' + self.recvcount = 0 + self.sendcount = 0 + + def getpeername(self, *args, **kw): + fn = self.getpeername = self.fd.getpeername + return fn(*args, **kw) + + def getsockname(self, *args, **kw): + fn = self.getsockname = self.fd.getsockname + return fn(*args, **kw) + + def listen(self, *args, **kw): + fn = self.listen = self.fd.listen + return fn(*args, **kw) + + def bind(self, *args, **kw): + fn = self.bind = self.fd.bind + return fn(*args, **kw) + + def getsockopt(self, *args, **kw): + fn = self.getsockopt = self.fd.getsockopt + return fn(*args, **kw) + + def setsockopt(self, *args, **kw): + fn = self.setsockopt = self.fd.setsockopt + return fn(*args, **kw) + + def connect_ex(self, *args, **kw): + fn = self.connect_ex = self.fd.connect_ex + return fn(*args, **kw) + + def fileno(self, *args, **kw): + fn = self.fileno = self.fd.fileno + return fn(*args, **kw) + + def setblocking(self, *args, **kw): + fn = self.setblocking = self.fd.setblocking + return fn(*args, **kw) + + def close(self, *args, **kw): + if self._closed: + return + self._closed = True + fn = self.close = self.fd.close + try: + res = fn(*args, **kw) + finally: + # This will raise socket.error(32, 'Broken pipe') if there's + # a caller waiting on trampoline (e.g. server on .accept()) + get_hub().exc_descriptor(self._fileno) + return res + + def accept(self): + fd = self.fd + while True: + res = util.socket_accept(fd) + if res is not None: + client, addr = res + util.set_nonblocking(client) + return type(self)(client), addr + trampoline(fd, read=True, write=True) + + def connect(self, address): + fd = self.fd + connect = util.socket_connect + while not connect(fd, address): + trampoline(fd, read=True, write=True) + + recv = higher_order_recv(util.socket_recv) + + def recvfrom(self, *args): + trampoline(self.fd, read=True) + return self.fd.recvfrom(*args) + + send = higher_order_send(util.socket_send) + + def sendto(self, *args): + trampoline(self.fd, write=True) + return self.fd.sendto(*args) + + def sendall(self, data): + fd = self.fd + tail = self.send(data) + while tail < len(data): + trampoline(self.fd, write=True) + tail += self.send(data[tail:]) + + def write(self, data): + return self.sendall(data) + + def readuntil(self, terminator, size=None): + buf, self.recvbuffer = self.recvbuffer, '' + checked = 0 + if size is None: + while True: + found = buf.find(terminator, checked) + if found != -1: + found += len(terminator) + chunk, self.recvbuffer = buf[:found], buf[found:] + return chunk + checked = len(buf) + d = self.recv(BUFFER_SIZE) + if not d: + break + buf += d + return buf + while len(buf) < size: + found = buf.find(terminator, checked) + if found != -1: + found += len(terminator) + chunk, self.recvbuffer = buf[:found], buf[found:] + return chunk + checked = len(buf) + d = self.recv(BUFFER_SIZE) + if not d: + break + buf += d + chunk, self.recvbuffer = buf[:size], buf[size:] + + def readline(self, size=None): + return self.readuntil(self.newlines, size=size) + + def __iter__(self): + return self.xreadlines() + + def readlines(self, size=None): + return list(self.xreadlines(size=size)) + + def xreadlines(self, size=None): + if size is None: + while True: + line = self.readline() + if not line: + break + yield line + else: + while size > 0: + line = self.readline(size) + if not line: + break + yield line + size -= len(line) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def read(self, size=None): + if size is not None and not isinstance(size, int): + raise TypeError + buf, self.recvbuffer = self.recvbuffer, '' + lst = [buf] + if size is None: + while True: + d = self.recv(BUFFER_SIZE) + if not d: + break + lst.append(d) + else: + buflen = len(buf) + while buflen < size: + d = self.recv(BUFFER_SIZE) + if not d: + break + buflen += len(d) + lst.append(d) + else: + d = lst[-1] + overbite = buflen - size + if overbite: + lst[-1], self.recvbuffer = d[:-overbite], d[-overbite:] + else: + lst[-1], self.recvbuffer = d, '' + return ''.join(lst) + + def makefile(self, *args, **kw): + return type(self)(self.fd) + + +class wrapped_file(wrapped_fd): + recv = higher_order_recv(util.file_recv) + + send = higher_order_send(util.file_send) + + def flush(self): + self.fd.flush() diff --git a/eventlet/wsgi.py b/eventlet/wsgi.py new file mode 100644 index 0000000..2fa765a --- /dev/null +++ b/eventlet/wsgi.py @@ -0,0 +1,219 @@ +"""\ +@file wsgi.py +@author Bob Ippolito + +Copyright (c) 2005-2006, Bob Ippolito +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import time +import urllib +import socket +import cStringIO +import SocketServer +import BaseHTTPServer + +from eventlet import api +from eventlet.httpdate import format_date_time + +class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): + + def log_message(self, format, *args): + self.server.log_message("%s - - [%s] %s" % ( + self.address_string(), + self.log_date_time_string(), + format % args)) + + def handle_one_request(self): + self.raw_requestline = self.rfile.readline() + + if not self.raw_requestline: + self.close_connection = 1 + return + + if not self.parse_request(): + return + + self.environ = self.get_environ() + try: + self.handle_one_response() + except socket.error, e: + # Broken pipe, connection reset by peer + if e[0] in (32, 54): + pass + else: + raise + + def handle_one_response(self): + headers_set = [] + headers_sent = [] + # set of lowercase header names that were sent + header_dict = {} + + wfile = self.wfile + num_blocks = None + + def write(data, _write=wfile.write): + if not headers_set: + raise AssertionError("write() before start_response()") + elif not headers_sent: + status, response_headers = headers_set + headers_sent.append(1) + for k, v in response_headers: + header_dict[k.lower()] = k + _write('HTTP/1.0 %s\r\n' % status) + # send Date header? + if 'date' not in header_dict: + _write('Date: %s\r\n' % (format_date_time(time.time()),)) + if 'content-length' not in header_dict and num_blocks == 1: + _write('Content-Length: %s\r\n' % (len(data),)) + for header in response_headers: + _write('%s: %s\r\n' % header) + _write('\r\n') + _write(data) + + def start_request(status, response_headers, exc_info=None): + if exc_info: + try: + if headers_sent: + # Re-raise original exception if headers sent + raise exc_info[0], exc_info[1], exc_info[2] + finally: + # Avoid dangling circular ref + exc_info = None + elif headers_set: + raise AssertionError("Headers already set!") + + headers_set[:] = [status, response_headers] + return write + + result = self.server.app(self.environ, start_request) + try: + num_blocks = len(result) + except (TypeError, AttributeError, NotImplementedError): + pass + + try: + for data in result: + if data: + write(data) + if not headers_sent: + write('') + finally: + if hasattr(result, 'close'): + result.close() + + def get_environ(self): + env = self.server.get_environ() + env['REQUEST_METHOD'] = self.command + env['SCRIPT_NAME'] = '' + + if '?' in self.path: + path, query = self.path.split('?', 1) + else: + path, query = self.path, '' + env['PATH_INFO'] = urllib.unquote(path) + env['QUERY_STRING'] = query + + if self.headers.typeheader is None: + env['CONTENT_TYPE'] = self.headers.type + else: + env['CONTENT_TYPE'] = self.headers.typeheader + + length = self.headers.getheader('content-length') + if length: + env['CONTENT_LENGTH'] = length + env['SERVER_PROTOCOL'] = 'HTTP/1.0' + + host, port = self.request.getsockname() + env['SERVER_NAME'] = host + env['SERVER_PORT'] = str(port) + env['REMOTE_ADDR'] = self.client_address[0] + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + + for h in self.headers.headers: + k, v = h.split(':', 1) + k = k.replace('-', '_').upper() + v = v.strip() + if k in env: + continue + envk = 'HTTP_' + k + if envk in env: + env[envk] += ',' + v + else: + env[envk] = v + + return env + + def finish(self): + # Override SocketServer.StreamRequestHandler.finish because + # we only need to call close on the socket, not the makefile'd things + + self.request.close() + + +class Server(BaseHTTPServer.HTTPServer): + def __init__(self, socket, address, app, log, environ=None): + self.socket = socket + self.address = address + if log: + self.log = log + log.write = log.info + else: + self.log = sys.stderr + self.app = app + self.environ = environ + + def get_environ(self): + socket = self.socket + d = { + 'wsgi.input': socket, + 'wsgi.errors': sys.stderr, + 'wsgi.version': (1, 0), + 'wsgi.multithread': True, + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'wsgi.url_scheme': 'http', + } + if self.environ is not None: + d.update(self.environ) + return d + + def process_request(self, (socket, address)): + proto = HttpProtocol(socket, address, self) + + def log_message(self, message): + self.log.write(message + '\n') + + +def server(socket, site, log=None, environ=None): + serv = Server(socket, socket.getsockname(), site, log, environ=None) + try: + print "wsgi starting up on", socket.getsockname() + while True: + try: + api.spawn(serv.process_request, socket.accept()) + except KeyboardInterrupt: + api.get_hub().remove_descriptor(socket.fileno()) + print "wsgi exiting" + break + finally: + socket.close() diff --git a/examples/echoserver.py b/examples/echoserver.py new file mode 100644 index 0000000..3fe0191 --- /dev/null +++ b/examples/echoserver.py @@ -0,0 +1,52 @@ +"""\ +@file echoserver.py + +Simple server that listens on port 6000 and echos back every input to +the client. To try out the server, start it up by running this file. + +Connect to it with: + telnet localhost 6000 + +You terminate your connection by terminating telnet (typically Ctrl-] +and then 'quit') + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from eventlet import api + +def handle_socket(client): + print "client connected" + while True: + # pass through every non-eof line + x = client.readline() + if not x: break + client.write(x) + print "echoed", x + print "client disconnected" + +# server socket listening on port 6000 +server = api.tcp_listener(('0.0.0.0', 6000)) +while True: + new_sock, address = server.accept() + # handle every new connection with a new coroutine + api.spawn(handle_socket, new_sock) + +server.close() diff --git a/examples/webcrawler.py b/examples/webcrawler.py new file mode 100644 index 0000000..27c9c40 --- /dev/null +++ b/examples/webcrawler.py @@ -0,0 +1,55 @@ +"""\ +@file webcrawler.py + +This is a simple web "crawler" that fetches a bunch of urls using a coroutine pool. It fetches as + many urls at time as coroutines in the pool. + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +urls = ["http://www.google.com/intl/en_ALL/images/logo.gif", + "http://wiki.secondlife.com/w/images/secondlife.jpg", + "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"] + +import time +from eventlet import coros, httpc, util + +# replace socket with a cooperative coroutine socket because httpc +# uses httplib, which uses socket. Removing this serializes the http +# requests, because the standard socket is blocking. +util.wrap_socket_with_coroutine_socket() + +def fetch(url): + # we could do something interesting with the result, but this is + # example code, so we'll just report that we did it + print "%s fetching %s" % (time.asctime(), url) + httpc.get(url) + print "%s fetched %s" % (time.asctime(), url) + +pool = coros.CoroutinePool(max_size=4) +waiters = [] +for url in urls: + waiters.append(pool.execute(fetch, url)) + +# wait for all the coroutines to come back before exiting the process +for waiter in waiters: + waiter.wait() + + From 711163bd414aed697c93231d4ac52ab3a640df6a Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 23 Aug 2007 20:00:20 -0400 Subject: [PATCH 03/79] [svn r4] httpc is not executable --- eventlet/httpc.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 eventlet/httpc.py diff --git a/eventlet/httpc.py b/eventlet/httpc.py old mode 100755 new mode 100644 From 5cba47bf4a9d46e812b99528b292624e3dc01324 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 23 Aug 2007 21:02:06 -0400 Subject: [PATCH 04/79] [svn r5] Removed unused api, twiddled README. --- README | 4 ++-- eventlet/httpd.py | 9 ++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/README b/README index 3bf8701..928b161 100644 --- a/README +++ b/README @@ -21,8 +21,8 @@ Eventlet runs on Python version 2.3 or greater, with the following dependenceis: == getting started == % python ->>> import eventlet.api ->>> help(eventlet.api) +>>> from eventlet import api +>>> help(api) Also, look at the examples in the examples directory. diff --git a/eventlet/httpd.py b/eventlet/httpd.py index e6904db..1bc38f6 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -266,13 +266,8 @@ class Request(object): environ = dict( REQUEST_METHOD='POST', QUERY_STRING=self._query or '') - if (hasattr(self, 'resource') and - hasattr(self.resource, 'getFieldStorage')): - self._field_storage = self.resource.getFieldStorage( - self, fl, headers, environ) - else: - self._field_storage = cgi.FieldStorage( - fl, headers, environ=environ) + + self._field_storage = cgi.FieldStorage(fl, headers, environ=environ) return self._field_storage From ba64483b7b4f402ecfd7d30d61ba09c1d0d8908a Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Fri, 5 Oct 2007 15:21:09 -0400 Subject: [PATCH 05/79] [svn r15] add NotModified to eventlet.httpc make it safe for PUT to return no body in httpc change over httpc.make_suite from using closures to using an instance of HttpSuite add HEAD support to httpc add HEAD support to jsonhttp add a setup.py All this development was support for my development of mulib.pantheon, checkin coming next. --- eventlet/httpc.py | 58 +++++++++++++++++++++++++++++++++----------- eventlet/httpd.py | 5 +++- eventlet/jsonhttp.py | 5 ++-- setup.py | 13 ++++++++++ 4 files changed, 64 insertions(+), 17 deletions(-) create mode 100644 setup.py diff --git a/eventlet/httpc.py b/eventlet/httpc.py index af6f40f..1922a41 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -31,7 +31,7 @@ import time import urlparse -from mx.DateTime import Parser +#from mx.DateTime import Parser _old_HTTPConnection = httplib.HTTPConnection @@ -212,12 +212,17 @@ class Gone(ConnectionError): pass +class NotModified(ConnectionError): + pass + + status_to_error_map = { 500: InternalServerError, 410: Gone, 404: NotFound, 403: Forbidden, 202: Accepted, + 304: NotModified, } scheme_to_factory_map = { @@ -294,28 +299,48 @@ def request(connection, method, url, body='', headers=None, dumper=None, loader= return body -def make_suite(dumper, loader, fallback_content_type): - def get(url, headers=None, use_proxy=False, verbose=False, ok=None): +def make_safe_loader(loader): + def safe_loader(what): + try: + return loader(what) + except Exception, e: + return None + return safe_loader + + +class HttpSuite(object): + def __init__(self, dumper, loader, fallback_content_type): + self.dumper = dumper + self.loader = loader + self.fallback_content_type = fallback_content_type + + def head(self, url, headers=None, use_proxy=False, verbose=False, ok=None): + if headers is None: + headers = {} + connection = connect(url) + return request(connection, 'HEAD', url, '', headers, None, None, use_proxy, verbose, ok) + + def get(self, url, headers=None, use_proxy=False, verbose=False, ok=None): #import pdb; pdb.Pdb().set_trace() if headers is None: headers = {} connection = connect(url) - return request(connection, 'GET', url, '', headers, None, loader, use_proxy, verbose, ok) + return request(connection, 'GET', url, '', headers, None, self.loader, use_proxy, verbose, ok) - def put(url, data, headers=None, content_type=None, verbose=False, ok=None): + def put(self, url, data, headers=None, content_type=None, verbose=False, ok=None): if headers is None: headers = {} if content_type is not None: headers['content-type'] = content_type else: - headers['content-type'] = fallback_content_type + headers['content-type'] = self.fallback_content_type connection = connect(url) - return request(connection, 'PUT', url, data, headers, dumper, loader, verbose=verbose, ok=ok) + return request(connection, 'PUT', url, data, headers, self.dumper, make_safe_loader(self.loader), verbose=verbose, ok=ok) - def delete(url, verbose=False, ok=None): + def delete(self, url, verbose=False, ok=None): return request(connect(url), 'DELETE', url, verbose=verbose, ok=ok) - def post(url, data='', headers=None, content_type=None, verbose=False, ok=None): + def post(self, url, data='', headers=None, content_type=None, verbose=False, ok=None): connection = connect(url) if headers is None: headers = {} @@ -323,12 +348,17 @@ def make_suite(dumper, loader, fallback_content_type): if content_type is not None: headers['content-type'] = content_type else: - headers['content-type'] = fallback_content_type - return request(connect(url), 'POST', url, data, headers, dumper, loader, verbose=verbose, ok=ok) - - return get, put, delete, post + headers['content-type'] = self.fallback_content_type + return request(connect(url), 'POST', url, data, headers, self.dumper, self.loader, verbose=verbose, ok=ok) -get, put, delete, post = make_suite(str, None, 'text/plain') +def make_suite(dumper, loader, fallback_content_type): + suite = HttpSuite(dumper, loader, fallback_content_type) + return suite.get, suite.put, suite.delete, suite.post + + +suite = HttpSuite(str, None, 'text/plain') +head, get, put, delete, post = ( + suite.head, suite.get, suite.put, suite.delete, suite.post) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 1bc38f6..8d078b5 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -416,7 +416,10 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): raise NotImplementedError("Handler failed to write response to request: %s" % request) if not hasattr(self, '_cached_body'): - request.read_body() ## read & discard body + try: + request.read_body() ## read & discard body + except: + pass continue except socket.error, e: # Broken pipe, connection reset by peer diff --git a/eventlet/jsonhttp.py b/eventlet/jsonhttp.py index 31610fb..3a22d0e 100644 --- a/eventlet/jsonhttp.py +++ b/eventlet/jsonhttp.py @@ -26,6 +26,7 @@ from eventlet import httpc import simplejson +suite = httpc.HttpSuite(simplejson.dumps, simplejson.loads, 'application/json') +head, get, put, delete, post = ( + suite.head, suite.get, suite.put, suite.delete, suite.post) -get, put, delete, post = httpc.make_suite( - simplejson.dumps, simplejson.loads, 'application/json') diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..8e03fe2 --- /dev/null +++ b/setup.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python + +from distutils.core import setup + +setup( + name='eventlet', + version='0.1', + description='Coroutine-based networking library', + author='Linden Lab', + author_email='sldev@lists.secondlife.com', + url='http://wiki.secondlife.com/wiki/Eventlet', + packages=['eventlet']) + From ef1afb440736d8e179245f2f365ad697199dbd25 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 8 Oct 2007 03:12:35 -0400 Subject: [PATCH 06/79] [svn r16] svn merge -r8:14 https://svn.secondlife.com/svn/eventlet/branches/beta-1 into https://svn.secondlife.com/svn/eventlet/trunk --- eventlet/api.py | 9 +++-- eventlet/httpc.py | 80 +++++++++++++++++++++++++++++++------------ eventlet/httpd.py | 6 ++-- eventlet/processes.py | 3 ++ eventlet/selecthub.py | 4 +-- 5 files changed, 73 insertions(+), 29 deletions(-) diff --git a/eventlet/api.py b/eventlet/api.py index b2e3c53..18bb4fb 100644 --- a/eventlet/api.py +++ b/eventlet/api.py @@ -38,9 +38,12 @@ except ImportError: pylibsupport.emulate() greenlet = sys.modules['greenlet'] except ImportError: - import stacklesssupport - stacklesssupport.emulate() - greenlet = sys.modules['greenlet'] + try: + import stacklesssupport + stacklesssupport.emulate() + greenlet = sys.modules['greenlet'] + except ImportError: + raise ImportError("Unable to find an implementation of greenlet.") from eventlet import greenlib, tls diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 1922a41..545919c 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -50,7 +50,6 @@ def host_and_port_from_url(url): """ host = None port = None - #print url parsed_url = urlparse.urlparse(url) try: host, port = parsed_url[1].split(':') @@ -81,19 +80,28 @@ class HttpClient(httplib.HTTPConnection): old_putrequest = httplib.HTTPConnection.putrequest putrequest = better_putrequest - -def wrap_httplib_with_httpc(): - httplib.HTTP._connection_class = httplib.HTTPConnection = HttpClient - httplib.HTTPS._connection_class = httplib.HTTPSConnection = HttpsClient - - class HttpsClient(httplib.HTTPSConnection): + """A subclass of httplib.HTTPSConnection which works around a bug + in the interaction between eventlet sockets and httplib. httplib relies + on gc to close the socket, causing the socket to be closed too early. + + This is an awful hack and the bug should be fixed properly ASAP. + """ def close(self): pass old_putrequest = httplib.HTTPSConnection.putrequest putrequest = better_putrequest +def wrap_httplib_with_httpc(): + """Replace httplib's implementations of these classes with our enhanced ones. + + Needed to work around code that uses httplib directly.""" + httplib.HTTP._connection_class = httplib.HTTPConnection = HttpClient + httplib.HTTPS._connection_class = httplib.HTTPSConnection = HttpsClient + + + class FileScheme(object): """Retarded scheme to local file wrapper.""" host = '' @@ -161,6 +169,10 @@ class FileScheme(object): class ConnectionError(Exception): + """Detailed exception class for reporting on http connection problems. + + There are lots of subclasses so you can use closely-specified + exception clauses.""" def __init__(self, method, host, port, path, status, reason, body): self.method = method self.host = host @@ -180,6 +192,7 @@ class ConnectionError(Exception): class UnparseableResponse(ConnectionError): + """Raised when a loader cannot parse the response from the server.""" def __init__(self, content_type, response): self.content_type = content_type self.response = response @@ -193,22 +206,27 @@ class UnparseableResponse(ConnectionError): class Accepted(ConnectionError): + """ 202 Accepted """ pass class NotFound(ConnectionError): + """ 404 Not Found """ pass class Forbidden(ConnectionError): + """ 403 Forbidden """ pass class InternalServerError(ConnectionError): + """ 500 Internal Server Error """ pass class Gone(ConnectionError): + """ 410 Gone """ pass @@ -233,6 +251,12 @@ scheme_to_factory_map = { def make_connection(scheme, location, use_proxy): + """ Create a connection object to a host:port. + + @param scheme Protocol, scheme, whatever you want to call it. http, file, https are currently supported. + @param location Hostname and port number, formatted as host:port or http://host:port if you're so inclined. + @param use_proxy Connect to a proxy instead of the actual location. Uses environment variables to decide where the proxy actually lives. + """ if use_proxy: if "http_proxy" in os.environ: location = os.environ["http_proxy"] @@ -241,7 +265,7 @@ def make_connection(scheme, location, use_proxy): else: location = "localhost:3128" #default to local squid - # run a little heuristic to see if it's an url, and if so parse out the hostpart + # run a little heuristic to see if location is an url, and if so parse out the hostpart if location.startswith('http'): _scheme, location, path, parameters, query, fragment = urlparse.urlparse(location) @@ -251,11 +275,25 @@ def make_connection(scheme, location, use_proxy): def connect(url, use_proxy=False): + """ Create a connection object to the host specified in a url. Convenience function for make_connection.""" scheme, location, path, params, query, id = urlparse.urlparse(url) return make_connection(scheme, location, use_proxy) def request(connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, verbose=False, ok=None): + """Make an http request to a url, for internal use mostly. + + @param connection The connection (as returned by make_connection) to use for the request. + @param method HTTP method + @param url Full url to make request on. + @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. + @param headers Dict of header name to header value + @param dumper Method that formats the body as a string. + @param loader Method that converts the response body into an object. + @param use_proxy Set to True if the connection is to a proxy. + @param verbose Set to true to change the return value of the function to: status, status_message, body + @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. + """ if ok is None: ok = (200, 201, 204) if headers is None: @@ -271,10 +309,12 @@ def request(connection, method, url, body='', headers=None, dumper=None, loader= if scheme == 'file': use_proxy = False - - if dumper is not None: - body = dumper(body) - headers['content-length'] = len(body) + if method in ('PUT', 'POST'): + if dumper is not None: + body = dumper(body) + # don't set content-length header because httplib does it for us in _send_request + else: + body = '' connection.request(method, url, body, headers) response = connection.getresponse() @@ -286,13 +326,11 @@ def request(connection, method, url, body='', headers=None, dumper=None, loader= body = response.read() - if loader is None: - return body - - try: - body = loader(body) - except Exception, e: - raise UnparseableResponse(loader, body) + if loader is not None: + try: + body = loader(body) + except Exception, e: + raise UnparseableResponse(loader, body) if verbose: return response.status, response.msg, body @@ -324,7 +362,7 @@ class HttpSuite(object): #import pdb; pdb.Pdb().set_trace() if headers is None: headers = {} - connection = connect(url) + connection = connect(url, use_proxy) return request(connection, 'GET', url, '', headers, None, self.loader, use_proxy, verbose, ok) def put(self, url, data, headers=None, content_type=None, verbose=False, ok=None): @@ -341,7 +379,6 @@ class HttpSuite(object): return request(connect(url), 'DELETE', url, verbose=verbose, ok=ok) def post(self, url, data='', headers=None, content_type=None, verbose=False, ok=None): - connection = connect(url) if headers is None: headers = {} if 'content-type' not in headers: @@ -353,6 +390,7 @@ class HttpSuite(object): def make_suite(dumper, loader, fallback_content_type): + """ Return a tuple of methods for making http requests with automatic bidirectional formatting with a particular content-type.""" suite = HttpSuite(dumper, loader, fallback_content_type) return suite.get, suite.put, suite.delete, suite.post diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 8d078b5..61cfff9 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -295,7 +295,7 @@ class Request(object): typ, val, tb = sys.exc_info() body = dict(type=str(typ), error=True, reason=str(val)) self.response(response) - if type(body) is str: + if(type(body) is str and not self.response_written()): self.write(body) return try: @@ -450,7 +450,7 @@ class Server(BaseHTTPServer.HTTPServer): self.log = self def write(self, something): - sys.stdout.write('%s\n' % (something, )) + sys.stdout.write('%s' % (something, )) def log_message(self, message): self.log.write(message) @@ -463,7 +463,7 @@ class Server(BaseHTTPServer.HTTPServer): client_address, date_time, requestline, code, size, request_time """ self.log.write( - '%s - - [%s] "%s" %s %s %.6f' % args) + '%s - - [%s] "%s" %s %s %.6f\n' % args) def server(sock, site, log=None, max_size=512): diff --git a/eventlet/processes.py b/eventlet/processes.py index 8c9aea0..c51d1e7 100644 --- a/eventlet/processes.py +++ b/eventlet/processes.py @@ -42,6 +42,9 @@ class Process(object): self.command = command self.args = args self._dead_callback = dead_callback + self.run() + + def run(self): self.dead = False self.started = False self.popen4 = None diff --git a/eventlet/selecthub.py b/eventlet/selecthub.py index e9b1e41..60e2bb6 100644 --- a/eventlet/selecthub.py +++ b/eventlet/selecthub.py @@ -86,7 +86,7 @@ class Hub(object): pass if exc is not None: try: - exc() + exc(fileno) except self.runloop.SYSTEM_EXCEPTIONS: self.squelch_exception(fileno, sys.exc_info()) @@ -157,7 +157,7 @@ class Hub(object): writers = self.writers excs = self.excs try: - r, w, ig = select.select(readers, writers, [], seconds) + r, w, ig = select.select(readers.keys(), writers.keys(), [], seconds) except select.error, e: if e.args[0] == errno.EINTR: return From 46d292efe6a36467752886940389566d7cf01236 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 15 Oct 2007 18:32:30 -0400 Subject: [PATCH 07/79] [svn r19] Refactored httpc a bit to make it easier to extend. One of the API changes is that it now accepts arbitrary keyword arguments, which are then passed on to all the implementation methods, in case your extension involves, say, an object that gets passed around. We're basically moving more and more stuff into the HttpSuite object. --- eventlet/httpc.py | 141 +++++++++++++++++++++++----------------------- 1 file changed, 72 insertions(+), 69 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 545919c..e7ecbbf 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -280,63 +280,6 @@ def connect(url, use_proxy=False): return make_connection(scheme, location, use_proxy) -def request(connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, verbose=False, ok=None): - """Make an http request to a url, for internal use mostly. - - @param connection The connection (as returned by make_connection) to use for the request. - @param method HTTP method - @param url Full url to make request on. - @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. - @param headers Dict of header name to header value - @param dumper Method that formats the body as a string. - @param loader Method that converts the response body into an object. - @param use_proxy Set to True if the connection is to a proxy. - @param verbose Set to true to change the return value of the function to: status, status_message, body - @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. - """ - if ok is None: - ok = (200, 201, 204) - if headers is None: - headers = {} - if not use_proxy: - scheme, location, path, params, query, id = urlparse.urlparse(url) - url = path - if query: - url += "?" + query - else: - scheme, location, path, params, query, id = urlparse.urlparse(url) - headers.update({ "host" : location }) - if scheme == 'file': - use_proxy = False - - if method in ('PUT', 'POST'): - if dumper is not None: - body = dumper(body) - # don't set content-length header because httplib does it for us in _send_request - else: - body = '' - - connection.request(method, url, body, headers) - response = connection.getresponse() - if (response.status not in ok): - klass = status_to_error_map.get(response.status, ConnectionError) - raise klass( - connection.method, connection.host, connection.port, - connection.path, response.status, response.reason, response.read()) - - body = response.read() - - if loader is not None: - try: - body = loader(body) - except Exception, e: - raise UnparseableResponse(loader, body) - - if verbose: - return response.status, response.msg, body - return body - - def make_safe_loader(loader): def safe_loader(what): try: @@ -352,20 +295,80 @@ class HttpSuite(object): self.loader = loader self.fallback_content_type = fallback_content_type - def head(self, url, headers=None, use_proxy=False, verbose=False, ok=None): + def request(self, connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, verbose=False, ok=None, **kwargs): + """Make an http request to a url, for internal use mostly. + + @param connection The connection (as returned by make_connection) to use for the request. + @param method HTTP method + @param url Full url to make request on. + @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. + @param headers Dict of header name to header value + @param dumper Method that formats the body as a string. + @param loader Method that converts the response body into an object. + @param use_proxy Set to True if the connection is to a proxy. + @param verbose Set to true to change the return value of the function to: status, status_message, body + @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. + """ + if ok is None: + ok = (200, 201, 204) + if headers is None: + headers = {} + if not use_proxy: + scheme, location, path, params, query, id = urlparse.urlparse(url) + url = path + if query: + url += "?" + query + else: + scheme, location, path, params, query, id = urlparse.urlparse(url) + headers.update({ "host" : location }) + if scheme == 'file': + use_proxy = False + + if method in ('PUT', 'POST'): + if dumper is not None: + body = dumper(body) + # don't set content-length header because httplib does it for us in _send_request + else: + body = '' + + response, body = self._get_response_body(connection, method, url, body, headers, ok, **kwargs) + + if loader is not None: + try: + body = loader(body) + except Exception, e: + raise UnparseableResponse(loader, body) + + if verbose: + return response.status, response.msg, body + return body + + def _get_response_body(self, connection, method, url, body, headers, ok, **kwargs): + connection.request(method, url, body, headers) + response = connection.getresponse() + if (response.status not in ok): + klass = status_to_error_map.get(response.status, ConnectionError) + raise klass( + connection.method, connection.host, connection.port, + connection.path, response.status, response.reason, response.read()) + + body = response.read() + return response, body + + def head(self, url, headers=None, use_proxy=False, verbose=False, ok=None, **kwargs): if headers is None: headers = {} connection = connect(url) - return request(connection, 'HEAD', url, '', headers, None, None, use_proxy, verbose, ok) + return self.request(connection, 'HEAD', url, '', headers, None, None, use_proxy, verbose, ok, **kwargs) - def get(self, url, headers=None, use_proxy=False, verbose=False, ok=None): + def get(self, url, headers=None, use_proxy=False, verbose=False, ok=None, **kwargs): #import pdb; pdb.Pdb().set_trace() if headers is None: headers = {} connection = connect(url, use_proxy) - return request(connection, 'GET', url, '', headers, None, self.loader, use_proxy, verbose, ok) + return self.request(connection, 'GET', url, '', headers, None, self.loader, use_proxy, verbose, ok, **kwargs) - def put(self, url, data, headers=None, content_type=None, verbose=False, ok=None): + def put(self, url, data, headers=None, content_type=None, verbose=False, ok=None, **kwargs): if headers is None: headers = {} if content_type is not None: @@ -373,12 +376,12 @@ class HttpSuite(object): else: headers['content-type'] = self.fallback_content_type connection = connect(url) - return request(connection, 'PUT', url, data, headers, self.dumper, make_safe_loader(self.loader), verbose=verbose, ok=ok) + return self.request(connection, 'PUT', url, data, headers, self.dumper, make_safe_loader(self.loader), verbose=verbose, ok=ok, **kwargs) - def delete(self, url, verbose=False, ok=None): - return request(connect(url), 'DELETE', url, verbose=verbose, ok=ok) + def delete(self, url, verbose=False, ok=None, **kwargs): + return request(connect(url), 'DELETE', url, verbose=verbose, ok=ok, **kwargs) - def post(self, url, data='', headers=None, content_type=None, verbose=False, ok=None): + def post(self, url, data='', headers=None, content_type=None, verbose=False, ok=None, **kwargs): if headers is None: headers = {} if 'content-type' not in headers: @@ -386,7 +389,7 @@ class HttpSuite(object): headers['content-type'] = content_type else: headers['content-type'] = self.fallback_content_type - return request(connect(url), 'POST', url, data, headers, self.dumper, self.loader, verbose=verbose, ok=ok) + return self.request(connect(url), 'POST', url, data, headers, self.dumper, self.loader, verbose=verbose, ok=ok, **kwargs) def make_suite(dumper, loader, fallback_content_type): @@ -396,7 +399,7 @@ def make_suite(dumper, loader, fallback_content_type): suite = HttpSuite(str, None, 'text/plain') -head, get, put, delete, post = ( - suite.head, suite.get, suite.put, suite.delete, suite.post) +head, get, put, delete, post, request = ( + suite.head, suite.get, suite.put, suite.delete, suite.post, suite.request) From 21c953273bd3b444060ebbb33d7846d198843dae Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Wed, 17 Oct 2007 17:50:21 -0400 Subject: [PATCH 08/79] [svn r20] Refactor the httpc API to return more information. Reviewed by Ryan. --- eventlet/httpc.py | 183 +++++++++++++++++++++++++--------------------- 1 file changed, 99 insertions(+), 84 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index e7ecbbf..ea4cd32 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -31,7 +31,7 @@ import time import urlparse -#from mx.DateTime import Parser +from mx.DateTime import Parser _old_HTTPConnection = httplib.HTTPConnection @@ -113,6 +113,7 @@ class FileScheme(object): def request(self, method, fullpath, body='', headers=None): self.status = 200 + self.msg = '' self.path = fullpath.split('?')[0] self.method = method = method.lower() assert method in ('get', 'put', 'delete') @@ -280,6 +281,62 @@ def connect(url, use_proxy=False): return make_connection(scheme, location, use_proxy) +def request_(connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, ok=None): + """Make an http request to a url, for internal use mostly. + + @param connection The connection (as returned by make_connection) to use for the request. + @param method HTTP method + @param url Full url to make request on. + @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. + @param headers Dict of header name to header value + @param dumper Method that formats the body as a string. + @param loader Method that converts the response body into an object. + @param use_proxy Set to True if the connection is to a proxy. + @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. + """ + if ok is None: + ok = (200, 201, 204) + if headers is None: + headers = {} + if not use_proxy: + scheme, location, path, params, query, id = urlparse.urlparse(url) + url = path + if query: + url += "?" + query + else: + scheme, location, path, params, query, id = urlparse.urlparse(url) + headers.update({ "host" : location }) + if scheme == 'file': + use_proxy = False + + if method in ('PUT', 'POST'): + if dumper is not None: + body = dumper(body) + # don't set content-length header because httplib does it for us in _send_request + else: + body = '' + + connection.request(method, url, body, headers) + response = connection.getresponse() + if (response.status not in ok): + klass = status_to_error_map.get(response.status, ConnectionError) + raise klass( + connection.method, connection.host, connection.port, + connection.path, response.status, response.reason, response.read()) + + body = response.read() + + if loader is not None: + try: + body = loader(body) + except Exception, e: + raise UnparseableResponse(loader, body) + + return response.status, response.msg, body + +def request(*args, **kwargs): + return request_(*args, **kwargs)[-1] + def make_safe_loader(loader): def safe_loader(what): try: @@ -295,101 +352,59 @@ class HttpSuite(object): self.loader = loader self.fallback_content_type = fallback_content_type - def request(self, connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, verbose=False, ok=None, **kwargs): - """Make an http request to a url, for internal use mostly. + def head_(self, url, headers=None, use_proxy=False, ok=None): + return request_(connect(url, use_proxy), method='HEAD', url=url, + body='', headers=headers, use_proxy=use_proxy, + ok=ok) - @param connection The connection (as returned by make_connection) to use for the request. - @param method HTTP method - @param url Full url to make request on. - @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. - @param headers Dict of header name to header value - @param dumper Method that formats the body as a string. - @param loader Method that converts the response body into an object. - @param use_proxy Set to True if the connection is to a proxy. - @param verbose Set to true to change the return value of the function to: status, status_message, body - @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. - """ - if ok is None: - ok = (200, 201, 204) - if headers is None: - headers = {} - if not use_proxy: - scheme, location, path, params, query, id = urlparse.urlparse(url) - url = path - if query: - url += "?" + query - else: - scheme, location, path, params, query, id = urlparse.urlparse(url) - headers.update({ "host" : location }) - if scheme == 'file': - use_proxy = False + def head(self, *args, **kwargs): + return self.head_(*args, **kwargs)[-1] - if method in ('PUT', 'POST'): - if dumper is not None: - body = dumper(body) - # don't set content-length header because httplib does it for us in _send_request - else: - body = '' - - response, body = self._get_response_body(connection, method, url, body, headers, ok, **kwargs) - - if loader is not None: - try: - body = loader(body) - except Exception, e: - raise UnparseableResponse(loader, body) - - if verbose: - return response.status, response.msg, body - return body - - def _get_response_body(self, connection, method, url, body, headers, ok, **kwargs): - connection.request(method, url, body, headers) - response = connection.getresponse() - if (response.status not in ok): - klass = status_to_error_map.get(response.status, ConnectionError) - raise klass( - connection.method, connection.host, connection.port, - connection.path, response.status, response.reason, response.read()) - - body = response.read() - return response, body - - def head(self, url, headers=None, use_proxy=False, verbose=False, ok=None, **kwargs): - if headers is None: - headers = {} - connection = connect(url) - return self.request(connection, 'HEAD', url, '', headers, None, None, use_proxy, verbose, ok, **kwargs) - - def get(self, url, headers=None, use_proxy=False, verbose=False, ok=None, **kwargs): + def get_(self, url, headers=None, use_proxy=False, ok=None): #import pdb; pdb.Pdb().set_trace() if headers is None: headers = {} - connection = connect(url, use_proxy) - return self.request(connection, 'GET', url, '', headers, None, self.loader, use_proxy, verbose, ok, **kwargs) + return request_(connect(url, use_proxy), method='GET', url=url, + body='', headers=headers, loader=self.loader, + use_proxy=use_proxy, ok=ok) - def put(self, url, data, headers=None, content_type=None, verbose=False, ok=None, **kwargs): + def get(self, *args, **kwargs): + return self.get_(*args, **kwargs)[-1] + + def put_(self, url, data, headers=None, content_type=None, ok=None): if headers is None: headers = {} - if content_type is not None: - headers['content-type'] = content_type - else: + if content_type is None: headers['content-type'] = self.fallback_content_type - connection = connect(url) - return self.request(connection, 'PUT', url, data, headers, self.dumper, make_safe_loader(self.loader), verbose=verbose, ok=ok, **kwargs) + else: + headers['content-type'] = content_type + return request_(connect(url), method='PUT', url=url, body=data, + headers=headers, dumper=self.dumper, + loader=make_safe_loader(self.loader), ok=ok) - def delete(self, url, verbose=False, ok=None, **kwargs): - return request(connect(url), 'DELETE', url, verbose=verbose, ok=ok, **kwargs) + def put(self, *args, **kwargs): + return self.put_(*args, **kwargs)[-1] - def post(self, url, data='', headers=None, content_type=None, verbose=False, ok=None, **kwargs): + def delete_(self, url, ok=None): + return request_(connect(url), method='DELETE', url=url, ok=ok) + + def delete(self, *args, **kwargs): + return self.delete_(*args, **kwargs)[-1] + + def post_(self, url, data='', headers=None, content_type=None, ok=None): if headers is None: headers = {} - if 'content-type' not in headers: - if content_type is not None: - headers['content-type'] = content_type - else: + if 'content-type' in headers: + if content_type is None: headers['content-type'] = self.fallback_content_type - return self.request(connect(url), 'POST', url, data, headers, self.dumper, self.loader, verbose=verbose, ok=ok, **kwargs) + else: + headers['content-type'] = content_type + return request_(connect(url), method='POST', url=url, body=data, + headers=headers, dumper=self.dumper, + loader=self.loader, ok=ok) + + def post(self, *args, **kwargs): + return self.post_(*args, **kwargs)[-1] def make_suite(dumper, loader, fallback_content_type): @@ -399,7 +414,7 @@ def make_suite(dumper, loader, fallback_content_type): suite = HttpSuite(str, None, 'text/plain') -head, get, put, delete, post, request = ( - suite.head, suite.get, suite.put, suite.delete, suite.post, suite.request) +head, get, put, delete, post = ( + suite.head, suite.get, suite.put, suite.delete, suite.post) From c2bb3afd9379586c876a3813582f45109174ea81 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Wed, 17 Oct 2007 20:01:56 -0400 Subject: [PATCH 09/79] [svn r21] Make httpd_test look for apachebench more carefully. Reviewed by Ryan. --- eventlet/httpd_test.py | 4 +++- eventlet/tests.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py index 19b2c9d..803e84e 100644 --- a/eventlet/httpd_test.py +++ b/eventlet/httpd_test.py @@ -131,7 +131,9 @@ class TestHttpd(tests.TestCase): def test_005_run_apachebench(self): url = 'http://localhost:12346/' - out = processes.Process('/usr/sbin/ab', ['-c','64','-n','1024', '-k', url]) + # ab is apachebench + out = processes.Process(tests.find_command('ab'), + ['-c','64','-n','1024', '-k', url]) print out.read() diff --git a/eventlet/tests.py b/eventlet/tests.py index 971509a..378fa18 100644 --- a/eventlet/tests.py +++ b/eventlet/tests.py @@ -24,6 +24,8 @@ THE SOFTWARE. """ import atexit +import errno +import os import sys import unittest @@ -34,3 +36,11 @@ TestCase = unittest.TestCase name = getattr(sys.modules['__main__'], '__name__', None) main = unittest.main + + +def find_command(command): + for dir in os.getenv('PATH', '/usr/bin:/usr/sbin').split(os.pathsep): + p = os.path.join(dir, command) + if os.access(p, os.X_OK): + return p + raise IOError(errno.ENOENT, 'Command not found: %r' % command) From dbe4bb08415a07d9e68d5368b2e45de5bd3b8795 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Wed, 17 Oct 2007 20:24:25 -0400 Subject: [PATCH 10/79] [svn r23] Add initial minimal httpc tests. --- eventlet/httpc.py | 14 ++++++--- eventlet/httpc_test.py | 69 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 4 deletions(-) create mode 100644 eventlet/httpc_test.py diff --git a/eventlet/httpc.py b/eventlet/httpc.py index ea4cd32..f7b8dd9 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -414,7 +414,13 @@ def make_suite(dumper, loader, fallback_content_type): suite = HttpSuite(str, None, 'text/plain') -head, get, put, delete, post = ( - suite.head, suite.get, suite.put, suite.delete, suite.post) - - +delete = suite.delete +delete_ = suite.delete_ +get = suite.get +get_ = suite.get_ +head = suite.head +head_ = suite.head_ +post = suite.post +post_ = suite.post_ +put = suite.put +put_ = suite.put_ diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py new file mode 100644 index 0000000..6126baf --- /dev/null +++ b/eventlet/httpc_test.py @@ -0,0 +1,69 @@ +"""\ +@file httpd_test.py +@author Bryan O'Sullivan + +Copyright (c) 2007, Linden Research, Inc. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from eventlet import api +from eventlet import httpc +from eventlet import httpd +from eventlet import processes +from eventlet import util + + +util.wrap_socket_with_coroutine_socket() + + +from eventlet import tests + + +class Site(object): + def handle_request(self, req): + req.set_header('x-hello', 'hello') + req.write('hello world') + + def adapt(self, obj, req): + req.write(str(obj)) + + +class TestHttpc(tests.TestCase): + def setUp(self): + self.victim = api.spawn(httpd.server, + api.tcp_listener(('0.0.0.0', 31337)), + Site(), + max_size=128) + + def tearDown(self): + api.kill(self.victim) + + def test_get(self): + response = httpc.get('http://localhost:31337/') + self.assert_(response == 'hello world') + + def test_get_(self): + status, msg, body = httpc.get_('http://localhost:31337/') + self.assert_(status == 200) + self.assert_(msg.dict['x-hello'] == 'hello') + self.assert_(body == 'hello world') + + +if __name__ == '__main__': + tests.main() From 673b161670577381082dba7ff2e17e787d964d77 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Wed, 17 Oct 2007 20:52:25 -0400 Subject: [PATCH 11/79] [svn r24] Restore Which's refactoring of httpc.request --- eventlet/httpc.py | 144 ++++++++++++++++++++++------------------- eventlet/httpc_test.py | 35 +++++++++- 2 files changed, 108 insertions(+), 71 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index f7b8dd9..d293f12 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -281,62 +281,6 @@ def connect(url, use_proxy=False): return make_connection(scheme, location, use_proxy) -def request_(connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, ok=None): - """Make an http request to a url, for internal use mostly. - - @param connection The connection (as returned by make_connection) to use for the request. - @param method HTTP method - @param url Full url to make request on. - @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. - @param headers Dict of header name to header value - @param dumper Method that formats the body as a string. - @param loader Method that converts the response body into an object. - @param use_proxy Set to True if the connection is to a proxy. - @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. - """ - if ok is None: - ok = (200, 201, 204) - if headers is None: - headers = {} - if not use_proxy: - scheme, location, path, params, query, id = urlparse.urlparse(url) - url = path - if query: - url += "?" + query - else: - scheme, location, path, params, query, id = urlparse.urlparse(url) - headers.update({ "host" : location }) - if scheme == 'file': - use_proxy = False - - if method in ('PUT', 'POST'): - if dumper is not None: - body = dumper(body) - # don't set content-length header because httplib does it for us in _send_request - else: - body = '' - - connection.request(method, url, body, headers) - response = connection.getresponse() - if (response.status not in ok): - klass = status_to_error_map.get(response.status, ConnectionError) - raise klass( - connection.method, connection.host, connection.port, - connection.path, response.status, response.reason, response.read()) - - body = response.read() - - if loader is not None: - try: - body = loader(body) - except Exception, e: - raise UnparseableResponse(loader, body) - - return response.status, response.msg, body - -def request(*args, **kwargs): - return request_(*args, **kwargs)[-1] - def make_safe_loader(loader): def safe_loader(what): try: @@ -352,10 +296,72 @@ class HttpSuite(object): self.loader = loader self.fallback_content_type = fallback_content_type + def request_(self, connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, ok=None): + """Make an http request to a url, for internal use mostly. + + @param connection The connection (as returned by make_connection) to use for the request. + @param method HTTP method + @param url Full url to make request on. + @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. + @param headers Dict of header name to header value + @param dumper Method that formats the body as a string. + @param loader Method that converts the response body into an object. + @param use_proxy Set to True if the connection is to a proxy. + @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. + """ + if ok is None: + ok = (200, 201, 204) + if headers is None: + headers = {} + if not use_proxy: + scheme, location, path, params, query, id = urlparse.urlparse(url) + url = path + if query: + url += "?" + query + else: + scheme, location, path, params, query, id = urlparse.urlparse(url) + headers.update({ "host" : location }) + if scheme == 'file': + use_proxy = False + + if method in ('PUT', 'POST'): + if dumper is not None: + body = dumper(body) + # don't set content-length header because httplib does it + # for us in _send_request + else: + body = '' + + response, body = self._get_response_body(connection, method, url, + body, headers, ok) + + if loader is not None: + try: + body = loader(body) + except Exception, e: + raise UnparseableResponse(loader, body) + + return response.status, response.msg, body + + def _get_response_body(self, connection, method, url, body, headers, ok): + connection.request(method, url, body, headers) + response = connection.getresponse() + if response.status not in ok: + klass = status_to_error_map.get(response.status, ConnectionError) + raise klass( + connection.method, connection.host, connection.port, + connection.path, response.status, response.reason, + response.read()) + + return response, response.read() + + def request(self, *args, **kwargs): + return self.request_(*args, **kwargs)[-1] + def head_(self, url, headers=None, use_proxy=False, ok=None): - return request_(connect(url, use_proxy), method='HEAD', url=url, - body='', headers=headers, use_proxy=use_proxy, - ok=ok) + return self.request_(connect(url, use_proxy), method='HEAD', url=url, + body='', headers=headers, use_proxy=use_proxy, + ok=ok) def head(self, *args, **kwargs): return self.head_(*args, **kwargs)[-1] @@ -364,9 +370,9 @@ class HttpSuite(object): #import pdb; pdb.Pdb().set_trace() if headers is None: headers = {} - return request_(connect(url, use_proxy), method='GET', url=url, - body='', headers=headers, loader=self.loader, - use_proxy=use_proxy, ok=ok) + return self.request_(connect(url, use_proxy), method='GET', url=url, + body='', headers=headers, loader=self.loader, + use_proxy=use_proxy, ok=ok) def get(self, *args, **kwargs): return self.get_(*args, **kwargs)[-1] @@ -378,9 +384,9 @@ class HttpSuite(object): headers['content-type'] = self.fallback_content_type else: headers['content-type'] = content_type - return request_(connect(url), method='PUT', url=url, body=data, - headers=headers, dumper=self.dumper, - loader=make_safe_loader(self.loader), ok=ok) + return self.request_(connect(url), method='PUT', url=url, body=data, + headers=headers, dumper=self.dumper, + loader=make_safe_loader(self.loader), ok=ok) def put(self, *args, **kwargs): return self.put_(*args, **kwargs)[-1] @@ -399,9 +405,9 @@ class HttpSuite(object): headers['content-type'] = self.fallback_content_type else: headers['content-type'] = content_type - return request_(connect(url), method='POST', url=url, body=data, - headers=headers, dumper=self.dumper, - loader=self.loader, ok=ok) + return self.request_(connect(url), method='POST', url=url, body=data, + headers=headers, dumper=self.dumper, + loader=self.loader, ok=ok) def post(self, *args, **kwargs): return self.post_(*args, **kwargs)[-1] @@ -424,3 +430,5 @@ post = suite.post post_ = suite.post_ put = suite.put put_ = suite.put_ +request = suite.request +request_ = suite.request_ diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 6126baf..5654221 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -27,6 +27,10 @@ from eventlet import httpc from eventlet import httpd from eventlet import processes from eventlet import util +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO util.wrap_socket_with_coroutine_socket() @@ -36,10 +40,25 @@ from eventlet import tests class Site(object): - def handle_request(self, req): - req.set_header('x-hello', 'hello') + def handle_get(self, req): + req.set_header('x-get', 'hello') + resp = StringIO() + pairs = req.get_query_pairs() + if pairs: + for k,v in pairs: + resp.write(k + '=' + v + '\n') + else: + resp.write('hello world') + req.write(resp.getvalue()) + + def handle_post(self, req): + print req.read_body() + req.set_header('x-post', 'hello') req.write('hello world') + def handle_request(self, req): + return getattr(self, 'handle_%s' % req.method().lower())(req) + def adapt(self, obj, req): req.write(str(obj)) @@ -61,9 +80,19 @@ class TestHttpc(tests.TestCase): def test_get_(self): status, msg, body = httpc.get_('http://localhost:31337/') self.assert_(status == 200) - self.assert_(msg.dict['x-hello'] == 'hello') + self.assert_(msg.dict['x-get'] == 'hello') self.assert_(body == 'hello world') + def test_get_query(self): + response = httpc.get('http://localhost:31337/?foo=bar&foo=quux') + self.assert_(response == 'foo=bar\nfoo=quux\n') + + def test_post_(self): + status, msg, body = httpc.post_('http://localhost:31337/', + data='qunge') + self.assert_(status == 200) + self.assert_(msg.dict['x-post'] == 'hello') + if __name__ == '__main__': tests.main() From 1086114c203aa04b876ff716a93b70844eccceac Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Wed, 17 Oct 2007 20:54:43 -0400 Subject: [PATCH 12/79] [svn r25] Test POST roundtripping. --- eventlet/httpc_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 5654221..da5506b 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -52,9 +52,8 @@ class Site(object): req.write(resp.getvalue()) def handle_post(self, req): - print req.read_body() req.set_header('x-post', 'hello') - req.write('hello world') + req.write(req.read_body()) def handle_request(self, req): return getattr(self, 'handle_%s' % req.method().lower())(req) @@ -88,10 +87,11 @@ class TestHttpc(tests.TestCase): self.assert_(response == 'foo=bar\nfoo=quux\n') def test_post_(self): - status, msg, body = httpc.post_('http://localhost:31337/', - data='qunge') + data = 'qunge' + status, msg, body = httpc.post_('http://localhost:31337/', data=data) self.assert_(status == 200) self.assert_(msg.dict['x-post'] == 'hello') + self.assert_(body == data) if __name__ == '__main__': From 93e5a9cfe13a46912e6fa03e703c181d6d7a9790 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 18 Oct 2007 15:18:58 -0400 Subject: [PATCH 13/79] [svn r26] Round out httpc tests. --- eventlet/httpc_test.py | 107 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 99 insertions(+), 8 deletions(-) diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index da5506b..14de657 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -1,5 +1,5 @@ """\ -@file httpd_test.py +@file httpc_test.py @author Bryan O'Sullivan Copyright (c) 2007, Linden Research, Inc. @@ -40,17 +40,50 @@ from eventlet import tests class Site(object): + def __init__(self): + self.stuff = {'hello': 'hello world'} + def handle_get(self, req): req.set_header('x-get', 'hello') resp = StringIO() pairs = req.get_query_pairs() + path = req.path().lstrip('/') + try: + resp.write(self.stuff[path]) + except KeyError: + req.response(404, body='Not found') + return if pairs: for k,v in pairs: resp.write(k + '=' + v + '\n') - else: - resp.write('hello world') req.write(resp.getvalue()) + def handle_put(self, req): + req.set_header('x-put', 'hello') + path = req.path().lstrip('/') + if not path: + req.response(400, body='') + return + if path in self.stuff: + req.response(204) + else: + req.response(201) + self.stuff[path] = req.read_body() + req.write('') + + def handle_delete(self, req): + req.set_header('x-delete', 'hello') + path = req.path().lstrip('/') + if not path: + req.response(400, body='') + return + try: + del self.stuff[path] + req.response(204) + except KeyError: + req.response(404) + req.write('') + def handle_post(self, req): req.set_header('x-post', 'hello') req.write(req.read_body()) @@ -72,19 +105,23 @@ class TestHttpc(tests.TestCase): def tearDown(self): api.kill(self.victim) + def test_get_bad_uri(self): + self.assertRaises(httpc.ConnectionError, + lambda: httpc.get('http://localhost:31337/b0gu5')) + def test_get(self): - response = httpc.get('http://localhost:31337/') + response = httpc.get('http://localhost:31337/hello') self.assert_(response == 'hello world') def test_get_(self): - status, msg, body = httpc.get_('http://localhost:31337/') + status, msg, body = httpc.get_('http://localhost:31337/hello') self.assert_(status == 200) self.assert_(msg.dict['x-get'] == 'hello') self.assert_(body == 'hello world') def test_get_query(self): - response = httpc.get('http://localhost:31337/?foo=bar&foo=quux') - self.assert_(response == 'foo=bar\nfoo=quux\n') + response = httpc.get('http://localhost:31337/hello?foo=bar&foo=quux') + self.assert_(response == 'hello worldfoo=bar\nfoo=quux\n') def test_post_(self): data = 'qunge' @@ -92,7 +129,61 @@ class TestHttpc(tests.TestCase): self.assert_(status == 200) self.assert_(msg.dict['x-post'] == 'hello') self.assert_(body == data) - + def test_post(self): + data = 'qunge' + self.assert_(httpc.post('http://localhost:31337/', data=data) == data) + + def test_put_bad_uri(self): + self.assertRaises( + httpc.ConnectionError, + lambda: httpc.put('http://localhost:31337/', data='')) + + def test_put_empty(self): + httpc.put('http://localhost:31337/empty', data='') + self.assert_(httpc.get('http://localhost:31337/empty') == '') + + def test_put_nonempty(self): + data = 'nonempty' + httpc.put('http://localhost:31337/nonempty', data=data) + self.assert_(httpc.get('http://localhost:31337/nonempty') == data) + + def test_put_01_create(self): + data = 'goodbye world' + status, msg, body = httpc.put_('http://localhost:31337/goodbye', + data=data) + self.assert_(status == 201) + self.assert_(msg.dict['x-put'] == 'hello') + self.assert_(body is None) + self.assert_(httpc.get('http://localhost:31337/goodbye') == data) + + def test_put_02_modify(self): + self.test_put_01_create() + data = 'i really mean goodbye' + status = httpc.put_('http://localhost:31337/goodbye', data=data)[0] + self.assert_(status == 204) + self.assert_(httpc.get('http://localhost:31337/goodbye') == data) + + def test_delete_(self): + httpc.put('http://localhost:31337/killme', data='killme') + status, msg, body = httpc.delete_('http://localhost:31337/killme') + self.assert_(status == 204) + self.assertRaises( + httpc.ConnectionError, + lambda: httpc.get('http://localhost:31337/killme')) + + def test_delete(self): + httpc.put('http://localhost:31337/killme', data='killme') + self.assert_(httpc.delete('http://localhost:31337/killme') == '') + self.assertRaises( + httpc.ConnectionError, + lambda: httpc.get('http://localhost:31337/killme')) + + def test_delete_bad_uri(self): + self.assertRaises( + httpc.ConnectionError, + lambda: httpc.delete('http://localhost:31337/b0gu5')) + + if __name__ == '__main__': tests.main() From 7c04866efed1ebbebda735c98b038ccd71f64a4f Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 18 Oct 2007 17:57:13 -0400 Subject: [PATCH 14/79] [svn r27] httpc_test cleanups, suggested by which. --- eventlet/httpc.py | 25 +++++++++++++-------- eventlet/httpc_test.py | 49 +++++++++++++++++++++--------------------- 2 files changed, 41 insertions(+), 33 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index d293f12..11a1361 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -210,9 +210,14 @@ class Accepted(ConnectionError): """ 202 Accepted """ pass + +class NotModified(ConnectionError): + """ 304 Not Modified """ + pass + -class NotFound(ConnectionError): - """ 404 Not Found """ +class BadRequest(ConnectionError): + """ 400 Bad Request """ pass @@ -221,8 +226,8 @@ class Forbidden(ConnectionError): pass -class InternalServerError(ConnectionError): - """ 500 Internal Server Error """ +class NotFound(ConnectionError): + """ 404 Not Found """ pass @@ -231,17 +236,19 @@ class Gone(ConnectionError): pass -class NotModified(ConnectionError): +class InternalServerError(ConnectionError): + """ 500 Internal Server Error """ pass status_to_error_map = { - 500: InternalServerError, - 410: Gone, - 404: NotFound, - 403: Forbidden, 202: Accepted, 304: NotModified, + 400: BadRequest, + 403: Forbidden, + 404: NotFound, + 410: Gone, + 500: InternalServerError, } scheme_to_factory_map = { diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 14de657..3c5f41a 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -106,82 +106,83 @@ class TestHttpc(tests.TestCase): api.kill(self.victim) def test_get_bad_uri(self): - self.assertRaises(httpc.ConnectionError, + self.assertRaises(httpc.NotFound, lambda: httpc.get('http://localhost:31337/b0gu5')) def test_get(self): response = httpc.get('http://localhost:31337/hello') - self.assert_(response == 'hello world') + self.assertEquals(response, 'hello world') def test_get_(self): status, msg, body = httpc.get_('http://localhost:31337/hello') - self.assert_(status == 200) - self.assert_(msg.dict['x-get'] == 'hello') - self.assert_(body == 'hello world') + self.assertEquals(status, 200) + self.assertEquals(msg.dict['x-get'], 'hello') + self.assertEquals(body, 'hello world') def test_get_query(self): response = httpc.get('http://localhost:31337/hello?foo=bar&foo=quux') - self.assert_(response == 'hello worldfoo=bar\nfoo=quux\n') + self.assertEquals(response, 'hello worldfoo=bar\nfoo=quux\n') def test_post_(self): data = 'qunge' status, msg, body = httpc.post_('http://localhost:31337/', data=data) - self.assert_(status == 200) - self.assert_(msg.dict['x-post'] == 'hello') - self.assert_(body == data) + self.assertEquals(status, 200) + self.assertEquals(msg.dict['x-post'], 'hello') + self.assertEquals(body, data) def test_post(self): data = 'qunge' - self.assert_(httpc.post('http://localhost:31337/', data=data) == data) + self.assertEquals(httpc.post('http://localhost:31337/', data=data), + data) def test_put_bad_uri(self): self.assertRaises( - httpc.ConnectionError, + httpc.BadRequest, lambda: httpc.put('http://localhost:31337/', data='')) def test_put_empty(self): httpc.put('http://localhost:31337/empty', data='') - self.assert_(httpc.get('http://localhost:31337/empty') == '') + self.assertEquals(httpc.get('http://localhost:31337/empty'), '') def test_put_nonempty(self): data = 'nonempty' httpc.put('http://localhost:31337/nonempty', data=data) - self.assert_(httpc.get('http://localhost:31337/nonempty') == data) + self.assertEquals(httpc.get('http://localhost:31337/nonempty'), data) def test_put_01_create(self): data = 'goodbye world' status, msg, body = httpc.put_('http://localhost:31337/goodbye', data=data) - self.assert_(status == 201) - self.assert_(msg.dict['x-put'] == 'hello') - self.assert_(body is None) - self.assert_(httpc.get('http://localhost:31337/goodbye') == data) + self.assertEquals(status, 201) + self.assertEquals(msg.dict['x-put'], 'hello') + self.assertEquals(body, None) + self.assertEquals(httpc.get('http://localhost:31337/goodbye'), data) def test_put_02_modify(self): self.test_put_01_create() data = 'i really mean goodbye' status = httpc.put_('http://localhost:31337/goodbye', data=data)[0] - self.assert_(status == 204) - self.assert_(httpc.get('http://localhost:31337/goodbye') == data) + self.assertEquals(status, 204) + self.assertEquals(httpc.get('http://localhost:31337/goodbye'), data) def test_delete_(self): httpc.put('http://localhost:31337/killme', data='killme') status, msg, body = httpc.delete_('http://localhost:31337/killme') - self.assert_(status == 204) + self.assertEquals(status, 204) self.assertRaises( - httpc.ConnectionError, + httpc.NotFound, lambda: httpc.get('http://localhost:31337/killme')) def test_delete(self): httpc.put('http://localhost:31337/killme', data='killme') - self.assert_(httpc.delete('http://localhost:31337/killme') == '') + self.assertEquals(httpc.delete('http://localhost:31337/killme'), '') self.assertRaises( - httpc.ConnectionError, + httpc.NotFound, lambda: httpc.get('http://localhost:31337/killme')) def test_delete_bad_uri(self): self.assertRaises( - httpc.ConnectionError, + httpc.NotFound, lambda: httpc.delete('http://localhost:31337/b0gu5')) From dd692a3ac8b9f5cc9690ddba9b5c869c5c0e3fb4 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 18 Oct 2007 18:23:38 -0400 Subject: [PATCH 15/79] [svn r28] Testin da HEAD verb, yo. --- eventlet/httpc_test.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 3c5f41a..5c0397d 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -58,6 +58,14 @@ class Site(object): resp.write(k + '=' + v + '\n') req.write(resp.getvalue()) + def handle_head(self, req): + req.set_header('x-head', 'hello') + path = req.path().lstrip('/') + try: + req.write('') + except KeyError: + req.response(404, body='Not found') + def handle_put(self, req): req.set_header('x-put', 'hello') path = req.path().lstrip('/') @@ -123,6 +131,15 @@ class TestHttpc(tests.TestCase): response = httpc.get('http://localhost:31337/hello?foo=bar&foo=quux') self.assertEquals(response, 'hello worldfoo=bar\nfoo=quux\n') + def test_head_(self): + status, msg, body = httpc.head_('http://localhost:31337/hello') + self.assertEquals(status, 200) + self.assertEquals(msg.dict['x-head'], 'hello') + self.assertEquals(body, '') + + def test_head(self): + self.assertEquals(httpc.head('http://localhost:31337/hello'), '') + def test_post_(self): data = 'qunge' status, msg, body = httpc.post_('http://localhost:31337/', data=data) From 4c5a3fe902502e3df3250e9554e1f2eee5766693 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Mon, 22 Oct 2007 14:01:05 -0400 Subject: [PATCH 16/79] [svn r30] Handle 301 redirect responses. --- eventlet/httpc.py | 43 ++++++++++++++++++++++++++++++++++++++---- eventlet/httpc_test.py | 19 +++++++++++++++---- eventlet/httpd.py | 4 ++++ 3 files changed, 58 insertions(+), 8 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 11a1361..388d576 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -174,7 +174,10 @@ class ConnectionError(Exception): There are lots of subclasses so you can use closely-specified exception clauses.""" - def __init__(self, method, host, port, path, status, reason, body): + def __init__(self, method, host, port, path, status, reason, body, + instance=None, connection=None, url='', headers={}, + dumper=None, loader=None, use_proxy=False, ok=None, + response_headers={}): self.method = method self.host = host self.port = port @@ -182,8 +185,20 @@ class ConnectionError(Exception): self.status = status self.reason = reason self.body = body + self.instance = instance + self.connection = connection + self.url = url + self.headers = headers + self.dumper = dumper + self.loader = loader + self.use_proxy = use_proxy + self.ok = ok + self.response_headers = response_headers Exception.__init__(self) + def location(self): + return self.response_headers.get('location') + def __repr__(self): return "ConnectionError(%r, %r, %r, %r, %r, %r, %r)" % ( self.method, self.host, self.port, @@ -211,6 +226,22 @@ class Accepted(ConnectionError): pass +class Retriable(ConnectionError): + def retry_(self): + url = self.location() or self.url + return self.instance.request_( + connect(url, self.use_proxy), self.method, url, self.body, + self.headers, self.dumper, self.loader, self.use_proxy, self.ok) + + def retry(self): + return self.retry_()[-1] + + +class MovedPermanently(Retriable): + """ 301 Moved Permanently """ + pass + + class NotModified(ConnectionError): """ 304 Not Modified """ pass @@ -243,6 +274,7 @@ class InternalServerError(ConnectionError): status_to_error_map = { 202: Accepted, + 301: MovedPermanently, 304: NotModified, 400: BadRequest, 403: Forbidden, @@ -340,7 +372,8 @@ class HttpSuite(object): body = '' response, body = self._get_response_body(connection, method, url, - body, headers, ok) + body, headers, ok, dumper, + loader, use_proxy) if loader is not None: try: @@ -350,7 +383,8 @@ class HttpSuite(object): return response.status, response.msg, body - def _get_response_body(self, connection, method, url, body, headers, ok): + def _get_response_body(self, connection, method, url, body, headers, ok, + dumper, loader, use_proxy): connection.request(method, url, body, headers) response = connection.getresponse() if response.status not in ok: @@ -358,7 +392,8 @@ class HttpSuite(object): raise klass( connection.method, connection.host, connection.port, connection.path, response.status, response.reason, - response.read()) + response.read(), self, connection, url, headers, dumper, + loader, use_proxy, ok, response.msg.dict) return response, response.read() diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 5c0397d..0843eb1 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -46,16 +46,14 @@ class Site(object): def handle_get(self, req): req.set_header('x-get', 'hello') resp = StringIO() - pairs = req.get_query_pairs() path = req.path().lstrip('/') try: resp.write(self.stuff[path]) except KeyError: req.response(404, body='Not found') return - if pairs: - for k,v in pairs: - resp.write(k + '=' + v + '\n') + for k,v in req.get_query_pairs(): + resp.write(k + '=' + v + '\n') req.write(resp.getvalue()) def handle_head(self, req): @@ -97,6 +95,11 @@ class Site(object): req.write(req.read_body()) def handle_request(self, req): + if req.path().startswith('/redirect/'): + url = ('http://' + req.get_header('host') + + req.uri().replace('/redirect/', '/')) + req.response(301, headers={'location': url}, body='') + return return getattr(self, 'handle_%s' % req.method().lower())(req) def adapt(self, obj, req): @@ -121,6 +124,14 @@ class TestHttpc(tests.TestCase): response = httpc.get('http://localhost:31337/hello') self.assertEquals(response, 'hello world') + def test_get_301(self): + try: + httpc.get('http://localhost:31337/redirect/hello') + self.assert_(False) + except httpc.MovedPermanently, err: + response = err.retry() + self.assertEquals(response, 'hello world') + def test_get_(self): status, msg, body = httpc.get_('http://localhost:31337/hello') self.assertEquals(status, 200) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 61cfff9..fe0d3ba 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -76,6 +76,10 @@ class Request(object): self._reason_phrase = reason_phrase self.protocol.set_response_code(self, code, reason_phrase) if headers is not None: + try: + headers = headers.iteritems() + except AttributeError: + pass for key, value in headers: self.set_header(key, value) if body is not None: From 69834f710e71e4e8bf2add9e689aad36496de615 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Tue, 23 Oct 2007 17:45:57 -0400 Subject: [PATCH 17/79] [svn r31] Get httpc to handle HTTP 303 status code. --- eventlet/httpc.py | 31 +++++++-- eventlet/httpc_test.py | 142 ++++++++++++++++++++++++++++------------- 2 files changed, 123 insertions(+), 50 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 388d576..f76a744 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -177,7 +177,7 @@ class ConnectionError(Exception): def __init__(self, method, host, port, path, status, reason, body, instance=None, connection=None, url='', headers={}, dumper=None, loader=None, use_proxy=False, ok=None, - response_headers={}): + response_headers={}, req_body=''): self.method = method self.host = host self.port = port @@ -194,6 +194,7 @@ class ConnectionError(Exception): self.use_proxy = use_proxy self.ok = ok self.response_headers = response_headers + self.req_body = req_body Exception.__init__(self) def location(self): @@ -227,11 +228,18 @@ class Accepted(ConnectionError): class Retriable(ConnectionError): + def retry_method(self): + return self.method + + def retry_url(self): + return self.location() or self.url() + def retry_(self): - url = self.location() or self.url + url = self.retry_url() return self.instance.request_( - connect(url, self.use_proxy), self.method, url, self.body, - self.headers, self.dumper, self.loader, self.use_proxy, self.ok) + connect(url, self.use_proxy), self.retry_method(), url, + self.req_body, self.headers, self.dumper, self.loader, + self.use_proxy, self.ok) def retry(self): return self.retry_()[-1] @@ -242,6 +250,13 @@ class MovedPermanently(Retriable): pass +class SeeOther(Retriable): + """ 303 See Other """ + + def retry_method(self): + return 'GET' + + class NotModified(ConnectionError): """ 304 Not Modified """ pass @@ -275,6 +290,7 @@ class InternalServerError(ConnectionError): status_to_error_map = { 202: Accepted, 301: MovedPermanently, + 303: SeeOther, 304: NotModified, 400: BadRequest, 403: Forbidden, @@ -363,6 +379,7 @@ class HttpSuite(object): if scheme == 'file': use_proxy = False + orig_body = body if method in ('PUT', 'POST'): if dumper is not None: body = dumper(body) @@ -373,7 +390,7 @@ class HttpSuite(object): response, body = self._get_response_body(connection, method, url, body, headers, ok, dumper, - loader, use_proxy) + loader, use_proxy, orig_body) if loader is not None: try: @@ -384,7 +401,7 @@ class HttpSuite(object): return response.status, response.msg, body def _get_response_body(self, connection, method, url, body, headers, ok, - dumper, loader, use_proxy): + dumper, loader, use_proxy, orig_body): connection.request(method, url, body, headers) response = connection.getresponse() if response.status not in ok: @@ -393,7 +410,7 @@ class HttpSuite(object): connection.method, connection.host, connection.port, connection.path, response.status, response.reason, response.read(), self, connection, url, headers, dumper, - loader, use_proxy, ok, response.msg.dict) + loader, use_proxy, ok, response.msg.dict, orig_body) return response, response.read() diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 0843eb1..91b212b 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -43,6 +43,14 @@ class Site(object): def __init__(self): self.stuff = {'hello': 'hello world'} + def adapt(self, obj, req): + req.write(str(obj)) + + def handle_request(self, req): + return getattr(self, 'handle_%s' % req.method().lower())(req) + + +class BasicSite(Site): def handle_get(self, req): req.set_header('x-get', 'hello') resp = StringIO() @@ -94,125 +102,173 @@ class Site(object): req.set_header('x-post', 'hello') req.write(req.read_body()) - def handle_request(self, req): - if req.path().startswith('/redirect/'): - url = ('http://' + req.get_header('host') + - req.uri().replace('/redirect/', '/')) - req.response(301, headers={'location': url}, body='') - return - return getattr(self, 'handle_%s' % req.method().lower())(req) - def adapt(self, obj, req): - req.write(str(obj)) +class TestBase(object): + site_class = BasicSite + def base_url(self): + return 'http://localhost:31337/' -class TestHttpc(tests.TestCase): def setUp(self): self.victim = api.spawn(httpd.server, api.tcp_listener(('0.0.0.0', 31337)), - Site(), + self.site_class(), max_size=128) def tearDown(self): api.kill(self.victim) + +class TestHttpc(TestBase, tests.TestCase): def test_get_bad_uri(self): self.assertRaises(httpc.NotFound, - lambda: httpc.get('http://localhost:31337/b0gu5')) + lambda: httpc.get(self.base_url() + 'b0gu5')) def test_get(self): - response = httpc.get('http://localhost:31337/hello') - self.assertEquals(response, 'hello world') - - def test_get_301(self): - try: - httpc.get('http://localhost:31337/redirect/hello') - self.assert_(False) - except httpc.MovedPermanently, err: - response = err.retry() + response = httpc.get(self.base_url() + 'hello') self.assertEquals(response, 'hello world') def test_get_(self): - status, msg, body = httpc.get_('http://localhost:31337/hello') + status, msg, body = httpc.get_(self.base_url() + 'hello') self.assertEquals(status, 200) self.assertEquals(msg.dict['x-get'], 'hello') self.assertEquals(body, 'hello world') def test_get_query(self): - response = httpc.get('http://localhost:31337/hello?foo=bar&foo=quux') + response = httpc.get(self.base_url() + 'hello?foo=bar&foo=quux') self.assertEquals(response, 'hello worldfoo=bar\nfoo=quux\n') def test_head_(self): - status, msg, body = httpc.head_('http://localhost:31337/hello') + status, msg, body = httpc.head_(self.base_url() + 'hello') self.assertEquals(status, 200) self.assertEquals(msg.dict['x-head'], 'hello') self.assertEquals(body, '') def test_head(self): - self.assertEquals(httpc.head('http://localhost:31337/hello'), '') + self.assertEquals(httpc.head(self.base_url() + 'hello'), '') def test_post_(self): data = 'qunge' - status, msg, body = httpc.post_('http://localhost:31337/', data=data) + status, msg, body = httpc.post_(self.base_url() + '', data=data) self.assertEquals(status, 200) self.assertEquals(msg.dict['x-post'], 'hello') self.assertEquals(body, data) def test_post(self): data = 'qunge' - self.assertEquals(httpc.post('http://localhost:31337/', data=data), + self.assertEquals(httpc.post(self.base_url() + '', data=data), data) def test_put_bad_uri(self): self.assertRaises( httpc.BadRequest, - lambda: httpc.put('http://localhost:31337/', data='')) + lambda: httpc.put(self.base_url() + '', data='')) def test_put_empty(self): - httpc.put('http://localhost:31337/empty', data='') - self.assertEquals(httpc.get('http://localhost:31337/empty'), '') + httpc.put(self.base_url() + 'empty', data='') + self.assertEquals(httpc.get(self.base_url() + 'empty'), '') def test_put_nonempty(self): data = 'nonempty' - httpc.put('http://localhost:31337/nonempty', data=data) - self.assertEquals(httpc.get('http://localhost:31337/nonempty'), data) + httpc.put(self.base_url() + 'nonempty', data=data) + self.assertEquals(httpc.get(self.base_url() + 'nonempty'), data) def test_put_01_create(self): data = 'goodbye world' - status, msg, body = httpc.put_('http://localhost:31337/goodbye', + status, msg, body = httpc.put_(self.base_url() + 'goodbye', data=data) self.assertEquals(status, 201) self.assertEquals(msg.dict['x-put'], 'hello') self.assertEquals(body, None) - self.assertEquals(httpc.get('http://localhost:31337/goodbye'), data) + self.assertEquals(httpc.get(self.base_url() + 'goodbye'), data) def test_put_02_modify(self): self.test_put_01_create() data = 'i really mean goodbye' - status = httpc.put_('http://localhost:31337/goodbye', data=data)[0] + status = httpc.put_(self.base_url() + 'goodbye', data=data)[0] self.assertEquals(status, 204) - self.assertEquals(httpc.get('http://localhost:31337/goodbye'), data) + self.assertEquals(httpc.get(self.base_url() + 'goodbye'), data) def test_delete_(self): - httpc.put('http://localhost:31337/killme', data='killme') - status, msg, body = httpc.delete_('http://localhost:31337/killme') + httpc.put(self.base_url() + 'killme', data='killme') + status, msg, body = httpc.delete_(self.base_url() + 'killme') self.assertEquals(status, 204) self.assertRaises( httpc.NotFound, - lambda: httpc.get('http://localhost:31337/killme')) + lambda: httpc.get(self.base_url() + 'killme')) def test_delete(self): - httpc.put('http://localhost:31337/killme', data='killme') - self.assertEquals(httpc.delete('http://localhost:31337/killme'), '') + httpc.put(self.base_url() + 'killme', data='killme') + self.assertEquals(httpc.delete(self.base_url() + 'killme'), '') self.assertRaises( httpc.NotFound, - lambda: httpc.get('http://localhost:31337/killme')) + lambda: httpc.get(self.base_url() + 'killme')) def test_delete_bad_uri(self): self.assertRaises( httpc.NotFound, - lambda: httpc.delete('http://localhost:31337/b0gu5')) + lambda: httpc.delete(self.base_url() + 'b0gu5')) +class Site301(BasicSite): + def handle_request(self, req): + if req.path().startswith('/redirect/'): + url = ('http://' + req.get_header('host') + + req.uri().replace('/redirect/', '/')) + req.response(301, headers={'location': url}, body='') + return + return Site.handle_request(self, req) + + +class Site303(BasicSite): + def handle_request(self, req): + if req.path().startswith('/redirect/'): + url = ('http://' + req.get_header('host') + + req.uri().replace('/redirect/', '/')) + req.response(303, headers={'location': url}, body='') + return + return Site.handle_request(self, req) + + +class TestHttpc301(TestBase, tests.TestCase): + site_class = Site301 + + def base_url(self): + return 'http://localhost:31337/redirect/' + + def test_get(self): + try: + httpc.get(self.base_url() + 'hello') + self.assert_(False) + except httpc.MovedPermanently, err: + response = err.retry() + self.assertEquals(response, 'hello world') + + def test_post(self): + data = 'qunge' + try: + response = httpc.post(self.base_url() + '', data=data) + self.assert_(False) + except httpc.MovedPermanently, err: + response = err.retry() + self.assertEquals(response, data) + + +class TestHttpc303(TestBase, tests.TestCase): + site_class = Site303 + + def base_url(self): + return 'http://localhost:31337/redirect/' + + def test_post(self): + data = 'hello world' + try: + response = httpc.post(self.base_url() + 'hello', data=data) + self.assert_(False) + except httpc.SeeOther, err: + response = err.retry() + self.assertEquals(response, data) + + if __name__ == '__main__': tests.main() From 00451d6de6d873a5bd243268d558e77d0d398fcb Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Tue, 23 Oct 2007 19:59:20 -0400 Subject: [PATCH 18/79] [svn r32] Support the 302 response code, "Found". --- eventlet/httpc.py | 44 ++++++++++++++++++++++++++++++++++++------ eventlet/httpc_test.py | 39 +++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index f76a744..411c571 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -31,7 +31,7 @@ import time import urlparse -from mx.DateTime import Parser +from mx import DateTime _old_HTTPConnection = httplib.HTTPConnection @@ -42,7 +42,9 @@ HTTP_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' to_http_time = lambda t: time.strftime(HTTP_TIME_FORMAT, time.gmtime(t)) -from_http_time = lambda t: int(Parser.DateTimeFromString(t).gmticks()) +def from_http_time(t, defaultdate=None): + return int(DateTime.Parser.DateTimeFromString( + t, defaultdate=defaultdate).gmticks()) def host_and_port_from_url(url): """@brief Simple function to get host and port from an http url. @@ -200,6 +202,16 @@ class ConnectionError(Exception): def location(self): return self.response_headers.get('location') + def expired(self): + # 14.21 Expires + # + # HTTP/1.1 clients and caches MUST treat other invalid date + # formats, especially including the value "0", as in the past + # (i.e., "already expired"). + expires = from_http_time(instance.response_headers.get('expires', '0'), + defaultdate=DateTime.Epoch) + return time.time() > expires + def __repr__(self): return "ConnectionError(%r, %r, %r, %r, %r, %r, %r)" % ( self.method, self.host, self.port, @@ -250,6 +262,12 @@ class MovedPermanently(Retriable): pass +class Found(Retriable): + """ 302 Found """ + + pass + + class SeeOther(Retriable): """ 303 See Other """ @@ -290,6 +308,7 @@ class InternalServerError(ConnectionError): status_to_error_map = { 202: Accepted, 301: MovedPermanently, + 302: Found, 303: SeeOther, 304: NotModified, 400: BadRequest, @@ -407,10 +426,23 @@ class HttpSuite(object): if response.status not in ok: klass = status_to_error_map.get(response.status, ConnectionError) raise klass( - connection.method, connection.host, connection.port, - connection.path, response.status, response.reason, - response.read(), self, connection, url, headers, dumper, - loader, use_proxy, ok, response.msg.dict, orig_body) + method=connection.method, + host=connection.host, + port=connection.port, + path=connection.path, + status=response.status, + reason=response.reason, + body=response.read(), + instance=self, + connection=connection, + url=url, + headers=headers, + dumper=dumper, + loader=loader, + use_proxy=use_proxy, + ok=ok, + response_headers=response.msg.dict, + req_body=orig_body) return response, response.read() diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 91b212b..50cdbb4 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -27,6 +27,7 @@ from eventlet import httpc from eventlet import httpd from eventlet import processes from eventlet import util +from mx import DateTime try: from cStringIO import StringIO except ImportError: @@ -220,6 +221,24 @@ class Site301(BasicSite): return Site.handle_request(self, req) +class Site302(BasicSite): + def handle_request(self, req): + if req.path().startswith('/expired/'): + url = ('http://' + req.get_header('host') + + req.uri().replace('/expired/', '/')) + headers = {'location': url, 'expires': '0'} + req.response(302, headers=headers, body='') + return + if req.path().startswith('/expires/'): + url = ('http://' + req.get_header('host') + + req.uri().replace('/expires/', '/')) + expires = (DateTime.gmt() + 100).gmticks() + headers = {'location': url, 'expires': httpc.to_http_time(expires)} + req.response(302, headers=headers, body='') + return + return Site.handle_request(self, req) + + class Site303(BasicSite): def handle_request(self, req): if req.path().startswith('/redirect/'): @@ -254,6 +273,26 @@ class TestHttpc301(TestBase, tests.TestCase): self.assertEquals(response, data) +class TestHttpc302(TestBase, tests.TestCase): + site_class = Site302 + + def test_get_expired(self): + try: + httpc.get(self.base_url() + 'expired/hello') + self.assert_(False) + except httpc.Found, err: + response = err.retry() + self.assertEquals(response, 'hello world') + + def test_get_expires(self): + try: + httpc.get(self.base_url() + 'expires/hello') + self.assert_(False) + except httpc.Found, err: + response = err.retry() + self.assertEquals(response, 'hello world') + + class TestHttpc303(TestBase, tests.TestCase): site_class = Site303 From af2682d7592d69ef469cfc29f2a0e12be00d8d8b Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 25 Oct 2007 13:24:54 -0400 Subject: [PATCH 19/79] [svn r33] HTTP response code 307 - Temporary Redirect --- eventlet/httpc.py | 6 ++++++ eventlet/httpc_test.py | 40 ++++++++++++++++++++++++++++++---------- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 411c571..e0621d7 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -279,6 +279,11 @@ class NotModified(ConnectionError): """ 304 Not Modified """ pass + +class TemporaryRedirect(Retriable): + """ 307 Temporary Redirect """ + pass + class BadRequest(ConnectionError): """ 400 Bad Request """ @@ -311,6 +316,7 @@ status_to_error_map = { 302: Found, 303: SeeOther, 304: NotModified, + 307: TemporaryRedirect, 400: BadRequest, 403: Forbidden, 404: NotFound, diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index 50cdbb4..a8f4ecb 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -211,15 +211,21 @@ class TestHttpc(TestBase, tests.TestCase): lambda: httpc.delete(self.base_url() + 'b0gu5')) -class Site301(BasicSite): +class RedirectSite(BasicSite): + response_code = 301 + def handle_request(self, req): if req.path().startswith('/redirect/'): url = ('http://' + req.get_header('host') + req.uri().replace('/redirect/', '/')) - req.response(301, headers={'location': url}, body='') + req.response(self.response_code, headers={'location': url}, + body='') return return Site.handle_request(self, req) +class Site301(RedirectSite): + pass + class Site302(BasicSite): def handle_request(self, req): @@ -239,14 +245,12 @@ class Site302(BasicSite): return Site.handle_request(self, req) -class Site303(BasicSite): - def handle_request(self, req): - if req.path().startswith('/redirect/'): - url = ('http://' + req.get_header('host') + - req.uri().replace('/redirect/', '/')) - req.response(303, headers={'location': url}, body='') - return - return Site.handle_request(self, req) +class Site303(RedirectSite): + response_code = 303 + + +class Site307(RedirectSite): + response_code = 307 class TestHttpc301(TestBase, tests.TestCase): @@ -309,5 +313,21 @@ class TestHttpc303(TestBase, tests.TestCase): self.assertEquals(response, data) +class TestHttpc307(TestBase, tests.TestCase): + site_class = Site307 + + def base_url(self): + return 'http://localhost:31337/redirect/' + + def test_post(self): + data = 'hello world' + try: + response = httpc.post(self.base_url() + 'hello', data=data) + self.assert_(False) + except httpc.TemporaryRedirect, err: + response = err.retry() + self.assertEquals(response, data) + + if __name__ == '__main__': tests.main() From 844d71caa84efd606bb2849a1f2bc3eaa3012bc7 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 25 Oct 2007 14:35:42 -0400 Subject: [PATCH 20/79] [svn r34] Propagate keyword arguments around more thoroughly. --- eventlet/httpc.py | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index e0621d7..981d2e2 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -376,7 +376,8 @@ class HttpSuite(object): self.loader = loader self.fallback_content_type = fallback_content_type - def request_(self, connection, method, url, body='', headers=None, dumper=None, loader=None, use_proxy=False, ok=None): + def request_(self, connection, method, url, body='', headers=None, + dumper=None, loader=None, use_proxy=False, ok=None, **kwargs): """Make an http request to a url, for internal use mostly. @param connection The connection (as returned by make_connection) to use for the request. @@ -415,7 +416,8 @@ class HttpSuite(object): response, body = self._get_response_body(connection, method, url, body, headers, ok, dumper, - loader, use_proxy, orig_body) + loader, use_proxy, orig_body, + **kwargs) if loader is not None: try: @@ -425,10 +427,8 @@ class HttpSuite(object): return response.status, response.msg, body - def _get_response_body(self, connection, method, url, body, headers, ok, - dumper, loader, use_proxy, orig_body): - connection.request(method, url, body, headers) - response = connection.getresponse() + def _check_status(self, connection, response, url, headers, dumper, loader, + use_proxy, ok, orig_body, **kwargs): if response.status not in ok: klass = status_to_error_map.get(response.status, ConnectionError) raise klass( @@ -450,31 +450,39 @@ class HttpSuite(object): response_headers=response.msg.dict, req_body=orig_body) + def _get_response_body(self, connection, method, url, body, headers, ok, + dumper, loader, use_proxy, orig_body, **kwargs): + connection.request(method, url, body, headers) + response = connection.getresponse() + self._check_status(connection, response, url, headers, dumper, loader, + use_proxy, ok, orig_body, **kwargs) + return response, response.read() def request(self, *args, **kwargs): return self.request_(*args, **kwargs)[-1] - def head_(self, url, headers=None, use_proxy=False, ok=None): + def head_(self, url, headers=None, use_proxy=False, ok=None, **kwargs): return self.request_(connect(url, use_proxy), method='HEAD', url=url, body='', headers=headers, use_proxy=use_proxy, - ok=ok) + ok=ok, **kwargs) def head(self, *args, **kwargs): return self.head_(*args, **kwargs)[-1] - def get_(self, url, headers=None, use_proxy=False, ok=None): + def get_(self, url, headers=None, use_proxy=False, ok=None, **kwargs): #import pdb; pdb.Pdb().set_trace() if headers is None: headers = {} return self.request_(connect(url, use_proxy), method='GET', url=url, body='', headers=headers, loader=self.loader, - use_proxy=use_proxy, ok=ok) + use_proxy=use_proxy, ok=ok, **kwargs) def get(self, *args, **kwargs): return self.get_(*args, **kwargs)[-1] - def put_(self, url, data, headers=None, content_type=None, ok=None): + def put_(self, url, data, headers=None, content_type=None, ok=None, + **kwargs): if headers is None: headers = {} if content_type is None: @@ -483,18 +491,21 @@ class HttpSuite(object): headers['content-type'] = content_type return self.request_(connect(url), method='PUT', url=url, body=data, headers=headers, dumper=self.dumper, - loader=make_safe_loader(self.loader), ok=ok) + loader=make_safe_loader(self.loader), ok=ok, + **kwargs) def put(self, *args, **kwargs): return self.put_(*args, **kwargs)[-1] - def delete_(self, url, ok=None): - return request_(connect(url), method='DELETE', url=url, ok=ok) + def delete_(self, url, ok=None, **kwargs): + return request_(connect(url), method='DELETE', url=url, ok=ok, + **kwargs) def delete(self, *args, **kwargs): return self.delete_(*args, **kwargs)[-1] - def post_(self, url, data='', headers=None, content_type=None, ok=None): + def post_(self, url, data='', headers=None, content_type=None, ok=None, + **kwargs): if headers is None: headers = {} if 'content-type' in headers: @@ -504,7 +515,7 @@ class HttpSuite(object): headers['content-type'] = content_type return self.request_(connect(url), method='POST', url=url, body=data, headers=headers, dumper=self.dumper, - loader=self.loader, ok=ok) + loader=self.loader, ok=ok, **kwargs) def post(self, *args, **kwargs): return self.post_(*args, **kwargs)[-1] From 966ab3ff4a241cc30d104afa6ef652d2a76b79ba Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Sat, 27 Oct 2007 03:49:23 -0400 Subject: [PATCH 21/79] [svn r37] Bugfix in httpc with setting content-type; move more greenlet accesses into api; add coros.execute which is like api.spawn but returns an event object for the return value of the function --- eventlet/api.py | 1 + eventlet/coros.py | 14 +++++++++++--- eventlet/httpc.py | 11 ++++++----- eventlet/httpd.py | 1 + eventlet/jsonhttp.py | 3 ++- 5 files changed, 21 insertions(+), 9 deletions(-) diff --git a/eventlet/api.py b/eventlet/api.py index 18bb4fb..e9877fa 100644 --- a/eventlet/api.py +++ b/eventlet/api.py @@ -231,6 +231,7 @@ def sleep(timeout=0): switch = greenlib.switch getcurrent = greenlet.getcurrent +GreenletExit = greenlet.GreenletExit class Spew(object): diff --git a/eventlet/coros.py b/eventlet/coros.py index d430969..9f02312 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -24,7 +24,6 @@ THE SOFTWARE. import time import traceback -import greenlet from eventlet import api @@ -62,7 +61,7 @@ class event(object): occured. """ if self._result is NOT_USED: - self._waiters[greenlet.getcurrent()] = True + self._waiters[api.getcurrent()] = True return api.get_hub().switch() if self._exc is not None: raise self._exc @@ -93,6 +92,15 @@ class event(object): for waiter in self._waiters: hub.schedule_call(0, greenlib.switch, waiter, self._result) + +def execute(func, *args, **kw): + evt = event() + def _really_execute(): + evt.send(func(*args, **kw)) + api.spawn(_really_execute) + return evt + + class CoroutinePool(pools.Pool): """ Like a thread pool, but with coroutines. """ def _main_loop(self, sender): @@ -104,7 +112,7 @@ class CoroutinePool(pools.Pool): result = func(*args, **kw) if evt is not None: evt.send(result) - except greenlet.GreenletExit: + except api.GreenletExit: pass except Exception, e: traceback.print_exc() diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 981d2e2..3d68242 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -485,10 +485,11 @@ class HttpSuite(object): **kwargs): if headers is None: headers = {} - if content_type is None: - headers['content-type'] = self.fallback_content_type - else: - headers['content-type'] = content_type + if 'content-type' not in headers: + if content_type is None: + headers['content-type'] = self.fallback_content_type + else: + headers['content-type'] = content_type return self.request_(connect(url), method='PUT', url=url, body=data, headers=headers, dumper=self.dumper, loader=make_safe_loader(self.loader), ok=ok, @@ -508,7 +509,7 @@ class HttpSuite(object): **kwargs): if headers is None: headers = {} - if 'content-type' in headers: + if 'content-type' not in headers: if content_type is None: headers['content-type'] = self.fallback_content_type else: diff --git a/eventlet/httpd.py b/eventlet/httpd.py index fe0d3ba..e47d2ab 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -405,6 +405,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): self.close_connection = True continue + self._code = 200 request = Request(self, self.command, self.path, self.headers) request.set_header('Server', self.version_string()) request.set_header('Date', self.date_time_string()) diff --git a/eventlet/jsonhttp.py b/eventlet/jsonhttp.py index 3a22d0e..4228a39 100644 --- a/eventlet/jsonhttp.py +++ b/eventlet/jsonhttp.py @@ -29,4 +29,5 @@ import simplejson suite = httpc.HttpSuite(simplejson.dumps, simplejson.loads, 'application/json') head, get, put, delete, post = ( suite.head, suite.get, suite.put, suite.delete, suite.post) - +head_, get_, put_, delete_, post_ = ( + suite.head_, suite.get_, suite.put_, suite.delete_, suite.post_) From 0c6e6b7fa3deee60887f58e0953776cfa6efdd77 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Tue, 30 Oct 2007 19:36:20 -0400 Subject: [PATCH 22/79] [svn r39] Allow response handlers to throw an ErrorResponse. --- eventlet/httpd.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index e47d2ab..c30d30f 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -45,6 +45,22 @@ USE_ACCESS_LOG = True CONNECTION_CLOSED = (errno.EPIPE, errno.ECONNRESET) +class ErrorResponse(Exception): + _responses = BaseHTTPServer.BaseHTTPRequestHandler.responses + + def __init__(self, code, reason_phrase=None, headers=None, body=None): + self.code = code + if reason_phrase is None: + self.reason = self._responses[code][0] + else: + self.reason = reason_phrase + self.headers = headers + if body is None: + self.body = self._responses[code][1] + else: + self.body = body + + class Request(object): _method = None _path = None @@ -415,7 +431,13 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): pass try: - self.server.site.handle_request(request) + try: + self.server.site.handle_request(request) + except ErrorResponse, err: + request.response(code=err.code, + reason_phrase=err.reason, + headers=err.headers, + body=err.body) # throw an exception if it failed to write a body if not request.response_written(): raise NotImplementedError("Handler failed to write response to request: %s" % request) From de6bcea6261e84831ac73b8cbe6ca97ef77d780d Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 31 Oct 2007 21:44:37 -0400 Subject: [PATCH 23/79] [svn r40] Initialize base Exception object so that str(ErrorResponse) doesn't itself throw an exception. --- eventlet/httpd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index c30d30f..e4d3bea 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -49,6 +49,7 @@ class ErrorResponse(Exception): _responses = BaseHTTPServer.BaseHTTPRequestHandler.responses def __init__(self, code, reason_phrase=None, headers=None, body=None): + Exception.__init__(self, reason_phrase) self.code = code if reason_phrase is None: self.reason = self._responses[code][0] From cf2247135c98df45c2247d4119b28214bd8ebf17 Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Mon, 5 Nov 2007 15:47:05 -0500 Subject: [PATCH 24/79] [svn r41] yak shaving to make the horribly ugly httpc exceptions look pretty http://twitter.com/donovanpreston/statuses/383868912 --- eventlet/httpc.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 3d68242..f58a30f 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -307,7 +307,25 @@ class Gone(ConnectionError): class InternalServerError(ConnectionError): """ 500 Internal Server Error """ - pass + def __repr__(self): + try: + import simplejson + body = simplejson.loads(self.body) + except: + traceback = self.body + else: + traceback = "Traceback (most recent call last):\n" + for frame in body['stack-trace']: + traceback += ' File "%s", line %s, in %s\n' % ( + frame['filename'], frame['lineno'], frame['method']) + for line in frame['code']: + if line['lineno'] == frame['lineno']: + traceback += ' %s' % (line['line'].lstrip(), ) + break + traceback += body['description'] + return "The server raised an exception from our request:\n%s %s\n%s %s\n%s" % ( + self.method, self.url, self.status, self.reason, traceback) + __str__ = __repr__ status_to_error_map = { From 865994d2e6621001cc6e7c17528e0d145c36d170 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 5 Nov 2007 19:40:04 -0500 Subject: [PATCH 25/79] [svn r42] Piped print statements into the server log so that you can configurably turn them off. --- eventlet/httpd.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index e4d3bea..56ecee2 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -309,7 +309,7 @@ class Request(object): def error(self, response=None, body=None, log_traceback=True): if log_traceback: - traceback.print_exc() + traceback.print_exc(file=self.log) if response is None: response = 500 if body is None: @@ -322,7 +322,7 @@ class Request(object): try: produce(body, self) except Exception, e: - traceback.print_exc() + traceback.print_exc(file=self.log) if not self.response_written(): self.write('Internal Server Error') @@ -484,7 +484,7 @@ class Server(BaseHTTPServer.HTTPServer): self.log.write(message) def log_exception(self, type, value, tb): - print ''.join(traceback.format_exception(type, value, tb)) + self.log.write(''.join(traceback.format_exception(type, value, tb))) def write_access_log_line(self, *args): """Write a line to the access.log. Arguments: @@ -498,7 +498,7 @@ def server(sock, site, log=None, max_size=512): pool = coros.CoroutinePool(max_size=max_size) serv = Server(sock, sock.getsockname(), site, log) try: - print "httpd starting up on", sock.getsockname() + serv.log.write("httpd starting up on %s\n" % (sock.getsockname(), )) while True: try: new_sock, address = sock.accept() @@ -506,7 +506,7 @@ def server(sock, site, log=None, max_size=512): pool.execute_async(proto.handle) except KeyboardInterrupt: api.get_hub().remove_descriptor(sock.fileno()) - print "httpd exiting" + serv.log.write("httpd exiting\n") break finally: try: From 4faa4714a32faf88861d3ad36e52348d5423a9dd Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Tue, 6 Nov 2007 19:52:08 -0500 Subject: [PATCH 26/79] [svn r43] Clean up the internal APIs. Reviewed by Which. --- eventlet/httpc.py | 241 ++++++++++++++++++++--------------------- eventlet/httpc_test.py | 2 +- 2 files changed, 119 insertions(+), 124 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index f58a30f..61f3c37 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -171,36 +171,58 @@ class FileScheme(object): self.path, self.status, self.reason, '') +class _Params(object): + def __init__(self, url, method, body='', headers=None, dumper=None, + loader=None, use_proxy=False, ok=(), aux=None): + ''' + @param connection The connection (as returned by make_connection) to use for the request. + @param method HTTP method + @param url Full url to make request on. + @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. + @param headers Dict of header name to header value + @param dumper Method that formats the body as a string. + @param loader Method that converts the response body into an object. + @param use_proxy Set to True if the connection is to a proxy. + @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. + ''' + self.instance = None + self.url = url + self.path = url + self.method = method + self.body = body + if headers is None: + self.headers = {} + else: + self.headers = headers + self.dumper = dumper + self.loader = loader + self.use_proxy = use_proxy + self.ok = ok or (200, 201, 204) + self.orig_body = body + self.aux = aux + + +class _LocalParams(_Params): + def __init__(self, params, **kwargs): + self._delegate = params + for k, v in kwargs.iteritems(): + setattr(self, k, v) + + def __getattr__(self, key): + return getattr(self._delegate, key) + + class ConnectionError(Exception): """Detailed exception class for reporting on http connection problems. There are lots of subclasses so you can use closely-specified exception clauses.""" - def __init__(self, method, host, port, path, status, reason, body, - instance=None, connection=None, url='', headers={}, - dumper=None, loader=None, use_proxy=False, ok=None, - response_headers={}, req_body=''): - self.method = method - self.host = host - self.port = port - self.path = path - self.status = status - self.reason = reason - self.body = body - self.instance = instance - self.connection = connection - self.url = url - self.headers = headers - self.dumper = dumper - self.loader = loader - self.use_proxy = use_proxy - self.ok = ok - self.response_headers = response_headers - self.req_body = req_body + def __init__(self, params): + self.params = params Exception.__init__(self) def location(self): - return self.response_headers.get('location') + return self.params.response.msg.dict.get('location') def expired(self): # 14.21 Expires @@ -208,14 +230,16 @@ class ConnectionError(Exception): # HTTP/1.1 clients and caches MUST treat other invalid date # formats, especially including the value "0", as in the past # (i.e., "already expired"). - expires = from_http_time(instance.response_headers.get('expires', '0'), - defaultdate=DateTime.Epoch) + expires = from_http_time( + self.params.response_headers.get('expires', '0'), + defaultdate=DateTime.Epoch) return time.time() > expires def __repr__(self): - return "ConnectionError(%r, %r, %r, %r, %r, %r, %r)" % ( - self.method, self.host, self.port, - self.path, self.status, self.reason, self.body) + response = self.params.response + return "%s(url=%r, method=%r, status=%r, reason=%r, body=%r)" % ( + self.__class__.__name__, self.params.url, self.params.method, + response.status, response.reason, self.params.body) __str__ = __repr__ @@ -241,17 +265,16 @@ class Accepted(ConnectionError): class Retriable(ConnectionError): def retry_method(self): - return self.method + return self.params.method def retry_url(self): return self.location() or self.url() def retry_(self): - url = self.retry_url() - return self.instance.request_( - connect(url, self.use_proxy), self.retry_method(), url, - self.req_body, self.headers, self.dumper, self.loader, - self.use_proxy, self.ok) + params = _LocalParams(self.params, + url=self.retry_url(), + method=self.retry_method()) + return self.params.instance.request_(params) def retry(self): return self.retry_()[-1] @@ -375,15 +398,19 @@ def make_connection(scheme, location, use_proxy): def connect(url, use_proxy=False): """ Create a connection object to the host specified in a url. Convenience function for make_connection.""" - scheme, location, path, params, query, id = urlparse.urlparse(url) + scheme, location = urlparse.urlparse(url)[:2] return make_connection(scheme, location, use_proxy) def make_safe_loader(loader): + if not callable(loader): + return loader def safe_loader(what): try: return loader(what) - except Exception, e: + except Exception: + import traceback + traceback.print_exc() return None return safe_loader @@ -394,113 +421,84 @@ class HttpSuite(object): self.loader = loader self.fallback_content_type = fallback_content_type - def request_(self, connection, method, url, body='', headers=None, - dumper=None, loader=None, use_proxy=False, ok=None, **kwargs): - """Make an http request to a url, for internal use mostly. + def request_(self, params): + '''Make an http request to a url, for internal use mostly.''' - @param connection The connection (as returned by make_connection) to use for the request. - @param method HTTP method - @param url Full url to make request on. - @param body HTTP body, if necessary for the method. Can be any object, assuming an appropriate dumper is also provided. - @param headers Dict of header name to header value - @param dumper Method that formats the body as a string. - @param loader Method that converts the response body into an object. - @param use_proxy Set to True if the connection is to a proxy. - @param ok Set of valid response statuses. If the returned status is not in this list, an exception is thrown. - """ - if ok is None: - ok = (200, 201, 204) - if headers is None: - headers = {} - if not use_proxy: - scheme, location, path, params, query, id = urlparse.urlparse(url) - url = path - if query: - url += "?" + query - else: - scheme, location, path, params, query, id = urlparse.urlparse(url) - headers.update({ "host" : location }) + params = _LocalParams(params, instance=self) + + (scheme, location, path, parameters, query, + fragment) = urlparse.urlparse(params.url) + + if params.use_proxy: if scheme == 'file': - use_proxy = False + params.use_proxy = False + else: + params.headers['host'] = location - orig_body = body - if method in ('PUT', 'POST'): - if dumper is not None: - body = dumper(body) + if not params.use_proxy: + params.path = path + if query: + params.path += '?' + query + + params.orig_body = params.body + + if params.method in ('PUT', 'POST'): + if params.dumper is not None: + params.body = params.dumper(params.body) # don't set content-length header because httplib does it # for us in _send_request else: - body = '' + params.body = '' - response, body = self._get_response_body(connection, method, url, - body, headers, ok, dumper, - loader, use_proxy, orig_body, - **kwargs) + params.response, params.response_body = self._get_response_body(params) + response, body = params.response, params.response_body - if loader is not None: + if params.loader is not None: try: - body = loader(body) + body = params.loader(body) except Exception, e: - raise UnparseableResponse(loader, body) + raise UnparseableResponse(params.loader, body) return response.status, response.msg, body - def _check_status(self, connection, response, url, headers, dumper, loader, - use_proxy, ok, orig_body, **kwargs): - if response.status not in ok: + def _check_status(self, params): + response = params.response + if response.status not in params.ok: klass = status_to_error_map.get(response.status, ConnectionError) - raise klass( - method=connection.method, - host=connection.host, - port=connection.port, - path=connection.path, - status=response.status, - reason=response.reason, - body=response.read(), - instance=self, - connection=connection, - url=url, - headers=headers, - dumper=dumper, - loader=loader, - use_proxy=use_proxy, - ok=ok, - response_headers=response.msg.dict, - req_body=orig_body) + raise klass(params) - def _get_response_body(self, connection, method, url, body, headers, ok, - dumper, loader, use_proxy, orig_body, **kwargs): - connection.request(method, url, body, headers) - response = connection.getresponse() - self._check_status(connection, response, url, headers, dumper, loader, - use_proxy, ok, orig_body, **kwargs) + def _get_response_body(self, params): + connection = connect(params.url, params.use_proxy) + connection.request(params.method, params.path, params.body, + params.headers) + params.response = connection.getresponse() + params.response_body = params.response.read() + self._check_status(params) - return response, response.read() + return params.response, params.response_body - def request(self, *args, **kwargs): - return self.request_(*args, **kwargs)[-1] + def request(self, params): + return self.request_(params)[-1] - def head_(self, url, headers=None, use_proxy=False, ok=None, **kwargs): - return self.request_(connect(url, use_proxy), method='HEAD', url=url, - body='', headers=headers, use_proxy=use_proxy, - ok=ok, **kwargs) + def head_(self, url, headers=None, use_proxy=False, ok=None, aux=None): + return self.request_(_Params(url, 'HEAD', headers=headers, + use_proxy=use_proxy, ok=ok, aux=aux)) def head(self, *args, **kwargs): return self.head_(*args, **kwargs)[-1] - def get_(self, url, headers=None, use_proxy=False, ok=None, **kwargs): - #import pdb; pdb.Pdb().set_trace() + def get_(self, url, headers=None, use_proxy=False, ok=None, aux=None): if headers is None: headers = {} - return self.request_(connect(url, use_proxy), method='GET', url=url, - body='', headers=headers, loader=self.loader, - use_proxy=use_proxy, ok=ok, **kwargs) + return self.request_(_Params(url, 'GET', headers=headers, + loader=make_safe_loader(self.loader), + use_proxy=use_proxy, ok=ok, aux=aux)) def get(self, *args, **kwargs): return self.get_(*args, **kwargs)[-1] def put_(self, url, data, headers=None, content_type=None, ok=None, - **kwargs): + aux=None): if headers is None: headers = {} if 'content-type' not in headers: @@ -508,23 +506,20 @@ class HttpSuite(object): headers['content-type'] = self.fallback_content_type else: headers['content-type'] = content_type - return self.request_(connect(url), method='PUT', url=url, body=data, - headers=headers, dumper=self.dumper, - loader=make_safe_loader(self.loader), ok=ok, - **kwargs) + return self.request_(_Params(url, 'PUT', body=data, headers=headers, + ok=ok, aux=aux)) def put(self, *args, **kwargs): return self.put_(*args, **kwargs)[-1] - def delete_(self, url, ok=None, **kwargs): - return request_(connect(url), method='DELETE', url=url, ok=ok, - **kwargs) + def delete_(self, url, ok=None, aux=None): + return request_(_Params(url, 'DELETE', ok=ok, aux=aux)) def delete(self, *args, **kwargs): return self.delete_(*args, **kwargs)[-1] def post_(self, url, data='', headers=None, content_type=None, ok=None, - **kwargs): + aux=None): if headers is None: headers = {} if 'content-type' not in headers: @@ -532,9 +527,9 @@ class HttpSuite(object): headers['content-type'] = self.fallback_content_type else: headers['content-type'] = content_type - return self.request_(connect(url), method='POST', url=url, body=data, + return self.request_(_Params(url, 'POST', body=data, headers=headers, dumper=self.dumper, - loader=self.loader, ok=ok, **kwargs) + loader=self.loader, ok=ok, aux=aux)) def post(self, *args, **kwargs): return self.post_(*args, **kwargs)[-1] diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index a8f4ecb..c433021 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -180,7 +180,7 @@ class TestHttpc(TestBase, tests.TestCase): data=data) self.assertEquals(status, 201) self.assertEquals(msg.dict['x-put'], 'hello') - self.assertEquals(body, None) + self.assertEquals(body, '') self.assertEquals(httpc.get(self.base_url() + 'goodbye'), data) def test_put_02_modify(self): From 29f8783a6c0298b4eb41301c0398ac3055a87050 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 8 Nov 2007 20:33:20 -0500 Subject: [PATCH 27/79] [svn r45] httpc.InternalServerError was doing its own repr that needed to pick up the params refactoring. Unit test shows that it works now. --- eventlet/httpc.py | 7 ++++--- eventlet/httpc_test.py | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 61f3c37..a0b55e3 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -333,9 +333,9 @@ class InternalServerError(ConnectionError): def __repr__(self): try: import simplejson - body = simplejson.loads(self.body) + body = simplejson.loads(self.params.response_body) except: - traceback = self.body + traceback = self.params.response_body else: traceback = "Traceback (most recent call last):\n" for frame in body['stack-trace']: @@ -347,10 +347,11 @@ class InternalServerError(ConnectionError): break traceback += body['description'] return "The server raised an exception from our request:\n%s %s\n%s %s\n%s" % ( - self.method, self.url, self.status, self.reason, traceback) + self.params.method, self.params.url, self.params.response.status, self.params.response.reason, traceback) __str__ = __repr__ + status_to_error_map = { 202: Accepted, 301: MovedPermanently, diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index c433021..b319b9f 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -329,5 +329,27 @@ class TestHttpc307(TestBase, tests.TestCase): self.assertEquals(response, data) +class Site500(BasicSite): + def handle_request(self, req): + req.response(500, body="screw you world") + return + +class TestHttpc500(TestBase, tests.TestCase): + site_class = Site500 + + def base_url(self): + return 'http://localhost:31337/' + + def test_get(self): + data = 'screw you world' + try: + response = httpc.get(self.base_url()) + self.fail() + except httpc.InternalServerError, e: + self.assertEquals(e.params.response_body, data) + self.assert_(str(e).count(data)) + self.assert_(repr(e).count(data)) + + if __name__ == '__main__': tests.main() From 89de4aaf86a7cae9d05cd5f3bb4f9be4512ef2b3 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Sat, 10 Nov 2007 20:30:04 -0500 Subject: [PATCH 28/79] [svn r46] Removed dependency on mxDateTime, though it is used if present. Added unit test for date/time parsing. --- eventlet/httpc.py | 30 ++++++++++++++++++++++-------- eventlet/httpc_test.py | 20 +++++++++++++++++--- 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index a0b55e3..063c0b4 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -31,20 +31,34 @@ import time import urlparse -from mx import DateTime - - _old_HTTPConnection = httplib.HTTPConnection _old_HTTPSConnection = httplib.HTTPSConnection HTTP_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' - - to_http_time = lambda t: time.strftime(HTTP_TIME_FORMAT, time.gmtime(t)) -def from_http_time(t, defaultdate=None): - return int(DateTime.Parser.DateTimeFromString( - t, defaultdate=defaultdate).gmticks()) + +try: + + from mx import DateTime + def from_http_time(t, defaultdate=None): + return int(DateTime.Parser.DateTimeFromString( + t, defaultdate=defaultdate).gmticks()) + +except ImportError: + + import calendar + parse_formats = (HTTP_TIME_FORMAT, # RFC 1123 + '%A, %d-%b-%y %H:%M:%S GMT', # RFC 850 + '%a %b %d %H:%M:%S %Y') # asctime + def from_http_time(t, defaultdate=None): + for parser in parse_formats: + try: + return calendar.timegm(time.strptime(t, parser)) + except ValueError: + continue + return defaultdate + def host_and_port_from_url(url): """@brief Simple function to get host and port from an http url. diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index b319b9f..c3f0c97 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -27,7 +27,7 @@ from eventlet import httpc from eventlet import httpd from eventlet import processes from eventlet import util -from mx import DateTime +import time try: from cStringIO import StringIO except ImportError: @@ -111,9 +111,11 @@ class TestBase(object): return 'http://localhost:31337/' def setUp(self): + self.logfile = StringIO() self.victim = api.spawn(httpd.server, api.tcp_listener(('0.0.0.0', 31337)), self.site_class(), + log=self.logfile, max_size=128) def tearDown(self): @@ -238,7 +240,7 @@ class Site302(BasicSite): if req.path().startswith('/expires/'): url = ('http://' + req.get_header('host') + req.uri().replace('/expires/', '/')) - expires = (DateTime.gmt() + 100).gmticks() + expires = time.time() + (100 * 24 * 60 * 60) headers = {'location': url, 'expires': httpc.to_http_time(expires)} req.response(302, headers=headers, body='') return @@ -349,7 +351,19 @@ class TestHttpc500(TestBase, tests.TestCase): self.assertEquals(e.params.response_body, data) self.assert_(str(e).count(data)) self.assert_(repr(e).count(data)) - + +class TestHttpTime(tests.TestCase): + rfc1123_time = 'Sun, 06 Nov 1994 08:49:37 GMT' + rfc850_time = 'Sunday, 06-Nov-94 08:49:37 GMT' + asctime_time = 'Sun Nov 6 08:49:37 1994' + secs_since_epoch = 784111777 + def test_to_http_time(self): + self.assertEqual(self.rfc1123_time, httpc.to_http_time(self.secs_since_epoch)) + + def test_from_http_time(self): + for formatted in (self.rfc1123_time, self.rfc850_time, self.asctime_time): + ticks = httpc.from_http_time(formatted, 0) + self.assertEqual(ticks, self.secs_since_epoch) if __name__ == '__main__': tests.main() From efc20fdf10c7595077fee6e850701547966017ae Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 15 Nov 2007 17:59:31 -0500 Subject: [PATCH 29/79] [svn r47] Solved the problem of switching coros from within an exception handler. One casualty was that we lost the ability to perform a bare raise after a switch, but that's not a huge deal since the exception handler can save of the exc_info if it really wants to re-raise. We'll be working on an extension module to restore the exc_info state so that even this limitation is eliminated. Paired by Which, Donovan, and Chet. --- eventlet/greenlib.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/eventlet/greenlib.py b/eventlet/greenlib.py index 0fdead5..d9367b7 100644 --- a/eventlet/greenlib.py +++ b/eventlet/greenlib.py @@ -306,6 +306,9 @@ def switch(other=None, value=None, exc=None): if not (other or hasattr(other, 'run')): raise SwitchingToDeadGreenlet("Switching to dead greenlet %r %r %r" % (other, value, exc)) _greenlet_context_call('swap_out') + running_exc = sys.exc_info() + if running_exc[0] != None: # see if we're in the middle of an exception handler + sys.exc_clear() # don't pass along exceptions to the other coroutine try: rval = other.switch(value, exc) if not rval or not other: @@ -315,9 +318,16 @@ def switch(other=None, value=None, exc=None): except: res, exc = None, sys.exc_info() _greenlet_context_call('swap_in') + # *NOTE: we don't restore exc_info, so don't switch inside an + # exception handler and then call sys.exc_info() or use bare + # raise. Instead, explicitly save off the exception before + # switching. We need an extension that allows us to restore the + # exception state at this point because vanilla Python doesn't + # allow that. if isinstance(exc, tuple): typ, exc, tb = exc raise typ, exc, tb elif exc is not None: raise exc + return res From 04977011f43bd7442ac5bfa64858e66e94df4701 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 15 Nov 2007 18:58:19 -0500 Subject: [PATCH 30/79] [svn r48] svn merge -r15:47 https://svn.secondlife.com/svn/eventlet/branches/beta-1 into https://svn.secondlife.com/svn/eventlet/trunk --- eventlet/httpc.py | 39 ++++++++++++++++++++++----------------- eventlet/httpd.py | 6 ++++++ eventlet/wrappedfd.py | 31 ++++++++++++++++++++++++++----- 3 files changed, 54 insertions(+), 22 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 063c0b4..1d4bbf8 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -74,37 +74,31 @@ def host_and_port_from_url(url): return host, port -def better_putrequest(self, method, url, skip_host=0): +def better_putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): self.method = method self.path = url - self.old_putrequest(method, url, skip_host) + try: + # Python 2.4 and above + self.old_putrequest(method, url, skip_host, skip_accept_encoding) + except TypeError: + # Python 2.3 and below + self.old_putrequest(method, url, skip_host) class HttpClient(httplib.HTTPConnection): - """A subclass of httplib.HTTPConnection which works around a bug - in the interaction between eventlet sockets and httplib. httplib relies - on gc to close the socket, causing the socket to be closed too early. - - This is an awful hack and the bug should be fixed properly ASAP. + """A subclass of httplib.HTTPConnection that provides a better + putrequest that records the method and path on the request object. """ def __init__(self, host, port=None, strict=None): _old_HTTPConnection.__init__(self, host, port, strict) - def close(self): - pass - old_putrequest = httplib.HTTPConnection.putrequest putrequest = better_putrequest class HttpsClient(httplib.HTTPSConnection): - """A subclass of httplib.HTTPSConnection which works around a bug - in the interaction between eventlet sockets and httplib. httplib relies - on gc to close the socket, causing the socket to be closed too early. - - This is an awful hack and the bug should be fixed properly ASAP. + """A subclass of httplib.HTTPSConnection that provides a better + putrequest that records the method and path on the request object. """ - def close(self): - pass old_putrequest = httplib.HTTPSConnection.putrequest putrequest = better_putrequest @@ -184,6 +178,11 @@ class FileScheme(object): self.method, self.host, self.port, self.path, self.status, self.reason, '') + def close(self): + """We're challenged here, and read the whole file rather than + integrating with this lib. file object already out of scope at this + point""" + pass class _Params(object): def __init__(self, url, method, body='', headers=None, dumper=None, @@ -471,6 +470,8 @@ class HttpSuite(object): if params.loader is not None: try: body = params.loader(body) + except KeyboardInterrupt: + raise except Exception, e: raise UnparseableResponse(params.loader, body) @@ -488,6 +489,7 @@ class HttpSuite(object): params.headers) params.response = connection.getresponse() params.response_body = params.response.read() + connection.close() self._check_status(params) return params.response, params.response_body @@ -505,6 +507,7 @@ class HttpSuite(object): def get_(self, url, headers=None, use_proxy=False, ok=None, aux=None): if headers is None: headers = {} + headers['accept'] = self.fallback_content_type+';q=1,*/*;q=0' return self.request_(_Params(url, 'GET', headers=headers, loader=make_safe_loader(self.loader), use_proxy=use_proxy, ok=ok, aux=aux)) @@ -521,6 +524,7 @@ class HttpSuite(object): headers['content-type'] = self.fallback_content_type else: headers['content-type'] = content_type + headers['accept'] = headers['content-type']+';q=1,*/*;q=0' return self.request_(_Params(url, 'PUT', body=data, headers=headers, ok=ok, aux=aux)) @@ -542,6 +546,7 @@ class HttpSuite(object): headers['content-type'] = self.fallback_content_type else: headers['content-type'] = content_type + headers['accept'] = headers['content-type']+';q=1,*/*;q=0' return self.request_(_Params(url, 'POST', body=data, headers=headers, dumper=self.dumper, loader=self.loader, ok=ok, aux=aux)) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 56ecee2..73b4388 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -329,6 +329,11 @@ class Request(object): def not_found(self): self.error(404, 'Not Found\n', log_traceback=False) + def raw_body(self): + if not hasattr(self, '_cached_body'): + self.read_body() + return self._cached_body + def read_body(self): if not hasattr(self, '_cached_parsed_body'): if not hasattr(self, '_cached_body'): @@ -504,6 +509,7 @@ def server(sock, site, log=None, max_size=512): new_sock, address = sock.accept() proto = HttpProtocol(new_sock, address, serv) pool.execute_async(proto.handle) + api.sleep(0) # sleep to allow other coros to run except KeyboardInterrupt: api.get_hub().remove_descriptor(sock.fileno()) serv.log.write("httpd exiting\n") diff --git a/eventlet/wrappedfd.py b/eventlet/wrappedfd.py index 32b0126..74811a2 100644 --- a/eventlet/wrappedfd.py +++ b/eventlet/wrappedfd.py @@ -64,18 +64,35 @@ def higher_order_send(send_func): +class RefCount(object): + def __init__(self): + self._count = 1 + + def increment(self): + self._count += 1 + + def decrement(self): + self._count -= 1 + assert self._count >= 0 + + def is_referenced(self): + return self._count > 0 + class wrapped_fd(object): newlines = '\r\n' mode = 'wb+' is_secure = False - def __init__(self, fd): + def __init__(self, fd, refcount = None): self._closed = False self.fd = fd self._fileno = fd.fileno() self.recvbuffer = '' self.recvcount = 0 - self.sendcount = 0 + self.sendcount = 0 + self._refcount = refcount + if refcount is None: + self._refcount = RefCount() def getpeername(self, *args, **kw): fn = self.getpeername = self.fd.getpeername @@ -116,6 +133,9 @@ class wrapped_fd(object): def close(self, *args, **kw): if self._closed: return + self._refcount.decrement() + if self._refcount.is_referenced(): + return self._closed = True fn = self.close = self.fd.close try: @@ -222,8 +242,8 @@ class wrapped_fd(object): self.write(line) def read(self, size=None): - if size is not None and not isinstance(size, int): - raise TypeError + if size is not None and not isinstance(size, (int, long)): + raise TypeError('Expecting an int or long for size, got %s: %s' % (type(size), repr(size))) buf, self.recvbuffer = self.recvbuffer, '' lst = [buf] if size is None: @@ -250,7 +270,8 @@ class wrapped_fd(object): return ''.join(lst) def makefile(self, *args, **kw): - return type(self)(self.fd) + self._refcount.increment() + return type(self)(self.fd, refcount = self._refcount) class wrapped_file(wrapped_fd): From ba89758dcfea0c72609524a6362bbbb78a3d7183 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Fri, 16 Nov 2007 10:47:03 -0500 Subject: [PATCH 31/79] [svn r49] Explicit deletion of traceback object to help stave off cyclic object references. --- eventlet/greenlib.py | 1 + 1 file changed, 1 insertion(+) diff --git a/eventlet/greenlib.py b/eventlet/greenlib.py index d9367b7..58dac7a 100644 --- a/eventlet/greenlib.py +++ b/eventlet/greenlib.py @@ -309,6 +309,7 @@ def switch(other=None, value=None, exc=None): running_exc = sys.exc_info() if running_exc[0] != None: # see if we're in the middle of an exception handler sys.exc_clear() # don't pass along exceptions to the other coroutine + del running_exc # tracebacks can create cyclic object references try: rval = other.switch(value, exc) if not rval or not other: From 04c2304173c9f46ab225f889f3e5555dfdc91cc4 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Sun, 18 Nov 2007 17:09:20 -0500 Subject: [PATCH 32/79] [svn r50] Such a silly fix, but it resolves the delete bug. I guess omitting the self qualifier caused delete_ to close over the global httpc.request_ instead of using its local (overridden) delete. Goddamn is that subtle. --- eventlet/httpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 1d4bbf8..a6e7acc 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -532,7 +532,7 @@ class HttpSuite(object): return self.put_(*args, **kwargs)[-1] def delete_(self, url, ok=None, aux=None): - return request_(_Params(url, 'DELETE', ok=ok, aux=aux)) + return self.request_(_Params(url, 'DELETE', ok=ok, aux=aux)) def delete(self, *args, **kwargs): return self.delete_(*args, **kwargs)[-1] From 4401a27f93a213c6fdb70621b66770a0188745df Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 21 Nov 2007 12:47:30 -0500 Subject: [PATCH 33/79] [svn r51] Modified setup.py to generate eggs (which work as far as I can tell), bumped the version number to 0.2. 0.1 is the beta-1 branch, so when we branch again it will be from trunk to beta-2. --- setup.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 8e03fe2..3a73e41 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,28 @@ #!/usr/bin/env python -from distutils.core import setup +from setuptools import setup setup( name='eventlet', - version='0.1', + version='0.2', description='Coroutine-based networking library', author='Linden Lab', author_email='sldev@lists.secondlife.com', url='http://wiki.secondlife.com/wiki/Eventlet', - packages=['eventlet']) + packages=['eventlet'], + install_requires=['greenlet'], + long_description=""" + Eventlet is a networking library written in Python. It achieves + high scalability by using non-blocking io while at the same time + retaining high programmer usability by using coroutines to make + the non-blocking io operations appear blocking at the source code + level.""", + classifiers=[ + "License :: OSI Approved :: MIT License", + "Programming Language :: Python", + "Topic :: Internet", + "Topic :: Software Development :: Libraries :: Python Modules", + "Intended Audience :: Developers", + "Development Status :: 4 - Beta"] + ) From 6cf2ed8ab6d1e97ed3d720feedd843c6c321a3bb Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Fri, 30 Nov 2007 11:15:17 -0500 Subject: [PATCH 34/79] [svn r53] From what I can tell, params.dumper doesn't work at all, so use self.dumper and loader instead. Add safe_load to jsonhttp which allows a response body to be an empty string without causing an exception. --- eventlet/httpc.py | 10 +++++----- eventlet/jsonhttp.py | 9 ++++++++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index a6e7acc..7b19c2e 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -457,8 +457,8 @@ class HttpSuite(object): params.orig_body = params.body if params.method in ('PUT', 'POST'): - if params.dumper is not None: - params.body = params.dumper(params.body) + if self.dumper is not None: + params.body = self.dumper(params.body) # don't set content-length header because httplib does it # for us in _send_request else: @@ -467,13 +467,13 @@ class HttpSuite(object): params.response, params.response_body = self._get_response_body(params) response, body = params.response, params.response_body - if params.loader is not None: + if self.loader is not None: try: - body = params.loader(body) + body = self.loader(body) except KeyboardInterrupt: raise except Exception, e: - raise UnparseableResponse(params.loader, body) + raise UnparseableResponse(self.loader, body) return response.status, response.msg, body diff --git a/eventlet/jsonhttp.py b/eventlet/jsonhttp.py index 4228a39..7c84af5 100644 --- a/eventlet/jsonhttp.py +++ b/eventlet/jsonhttp.py @@ -26,7 +26,14 @@ from eventlet import httpc import simplejson -suite = httpc.HttpSuite(simplejson.dumps, simplejson.loads, 'application/json') + +def safe_load(what): + if not what: + return None + return simplejson.loads(what) + + +suite = httpc.HttpSuite(simplejson.dumps, safe_load, 'application/json') head, get, put, delete, post = ( suite.head, suite.get, suite.put, suite.delete, suite.post) head_, get_, put_, delete_, post_ = ( From 0e991c95275eaa360485bba0b7db4859ea0fe7fc Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Fri, 30 Nov 2007 20:21:30 -0500 Subject: [PATCH 35/79] [svn r54] 503 now maps to ServiceUnavailable, a Retriable exception. --- eventlet/httpc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 7b19c2e..cdc0f47 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -340,6 +340,10 @@ class Gone(ConnectionError): """ 410 Gone """ pass +class ServiceUnavailable(httpc.Retriable): + """ 503 Service Unavailable """ + def url(self): + return self.params._delegate.url class InternalServerError(ConnectionError): """ 500 Internal Server Error """ @@ -377,6 +381,7 @@ status_to_error_map = { 404: NotFound, 410: Gone, 500: InternalServerError, + 503: ServiceUnavailable, } scheme_to_factory_map = { From 49070dd4c23bca4012e45e1f90a0b09c722be912 Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Fri, 30 Nov 2007 20:27:14 -0500 Subject: [PATCH 36/79] [svn r55] Oy: typo in that last change --- eventlet/httpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index cdc0f47..3567830 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -340,7 +340,7 @@ class Gone(ConnectionError): """ 410 Gone """ pass -class ServiceUnavailable(httpc.Retriable): +class ServiceUnavailable(Retriable): """ 503 Service Unavailable """ def url(self): return self.params._delegate.url From 26c86ee828a70a8db2f2b3c8a6aec8795af442c0 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 5 Dec 2007 21:02:58 -0500 Subject: [PATCH 37/79] [svn r59] Moved saranwrap to eventlet, moved mysql_pool over and abstracted it a bit to db_pool. It *should* be compatible with other DB-API implementations but no testing has been done. --- eventlet/db_pool.py | 94 ++++++ eventlet/db_pool_test.py | 271 ++++++++++++++++ eventlet/saranwrap.py | 648 +++++++++++++++++++++++++++++++++++++ eventlet/saranwrap_test.py | 272 ++++++++++++++++ 4 files changed, 1285 insertions(+) create mode 100644 eventlet/db_pool.py create mode 100644 eventlet/db_pool_test.py create mode 100644 eventlet/saranwrap.py create mode 100644 eventlet/saranwrap_test.py diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py new file mode 100644 index 0000000..6b37e1c --- /dev/null +++ b/eventlet/db_pool.py @@ -0,0 +1,94 @@ +"""\ +@file db_pool.py +@brief Uses saranwrap to implement a pool of nonblocking database connections to a db server. + +Copyright (c) 2007, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import os + +from eventlet.pools import Pool +from eventlet.processes import DeadProcess +from eventlet import saranwrap + +class DatabaseConnector(object): + """\ +@brief This is an object which will maintain a collection of database +connection pools keyed on host,databasename""" + def __init__(self, module, credentials, min_size = 0, max_size = 4, *args, **kwargs): + """\ + @brief constructor + @param min_size the minimum size of a child pool. + @param max_size the maximum size of a child pool.""" + assert(module) + self._module = module + self._min_size = min_size + self._max_size = max_size + self._args = args + self._kwargs = kwargs + self._credentials = credentials # this is a map of hostname to username/password + self._databases = {} + + def credentials_for(self, host): + if host in self._credentials: + return self._credentials[host] + else: + return self._credentials.get('default', None) + + def get(self, host, dbname): + key = (host, dbname) + if key not in self._databases: + new_kwargs = self._kwargs.copy() + new_kwargs['db'] = dbname + new_kwargs['host'] = host + new_kwargs.update(self.credentials_for(host)) + dbpool = ConnectionPool(self._module, self._min_size, self._max_size, *self._args, **new_kwargs) + self._databases[key] = dbpool + + return self._databases[key] + + +class ConnectionPool(Pool): + """A pool which gives out saranwrapped database connections from a pool + """ + def __init__(self, module, min_size = 0, max_size = 4, *args, **kwargs): + assert(module) + self._module = module + self._args = args + self._kwargs = kwargs + Pool.__init__(self, min_size, max_size) + + def create(self): + return saranwrap.wrap(self._module).connect(*self._args, **self._kwargs) + + def put(self, conn): + # rollback any uncommitted changes, so that the next process + # has a clean slate. This also pokes the process to see if + # it's dead or None + try: + conn.rollback() + except (AttributeError, DeadProcess), e: + conn = self.create() + # TODO figure out if we're still connected to the database + if conn is not None: + Pool.put(self, conn) + else: + self.current_size -= 1 diff --git a/eventlet/db_pool_test.py b/eventlet/db_pool_test.py new file mode 100644 index 0000000..33dea27 --- /dev/null +++ b/eventlet/db_pool_test.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# @file test_mysql_pool.py +# @brief Test cases for mysql_pool +# +# Copyright (c) 2007, Linden Research, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import os.path + +from eventlet import api, coros +from eventlet import db_pool + +import unittest + +class DBTester(object): + def setUp(self): + self.create_db() + self.connection = None + connection = self._dbmodule.connect(**self._auth) + cursor = connection.cursor() + cursor.execute("""CREATE TABLE gargleblatz + ( + a INTEGER + ) ENGINE = InnoDB;""") + connection.commit() + cursor.close() + + def tearDown(self): + if self.connection is not None: + self.connection.close() + self.drop_db() + + def set_up_test_table(self, connection = None): + if connection is None: + if self.connection is None: + self.connection = self._dbmodule.connect(**self._auth) + connection = self.connection + + cursor = connection.cursor() + cursor.execute("""CREATE TEMPORARY TABLE test_table + ( + row_id INTEGER PRIMARY KEY AUTO_INCREMENT, + value_int INTEGER, + value_float FLOAT, + value_string VARCHAR(200), + value_uuid CHAR(36), + value_binary BLOB, + value_binary_string VARCHAR(200) BINARY, + value_enum ENUM('Y','N'), + created TIMESTAMP + ) ENGINE = InnoDB;""") + connection.commit() + cursor.close() + +class TestDBConnectionPool(DBTester): + def setUp(self): + super(TestDBConnectionPool, self).setUp() + self.pool = self.create_pool() + self.connection = self.pool.get() + + def tearDown(self): + self.pool.put(self.connection) + super(TestDBConnectionPool, self).tearDown() + + def create_pool(self, max_items = 1): + return db_pool.ConnectionPool(self._dbmodule, 0, max_items, **self._auth) + + def assert_cursor_works(self, cursor): + cursor.execute("show full processlist") + rows = cursor.fetchall() + self.assert_(rows) + + def test_connecting(self): + self.assert_(self.connection is not None) + + def test_create_cursor(self): + cursor = self.connection.cursor() + cursor.close() + + def test_run_query(self): + cursor = self.connection.cursor() + self.assert_cursor_works(cursor) + cursor.close() + + def test_run_bad_query(self): + cursor = self.connection.cursor() + try: + cursor.execute("garbage blah blah") + self.assert_(False) + except AssertionError: + raise + except Exception, e: + pass + cursor.close() + + def test_put_none(self): + # the pool is of size 1, and its only connection is out + self.assert_(self.pool.free() == 0) + self.pool.put(None) + # ha ha we fooled it into thinking that we had a dead process + self.assert_(self.pool.free() == 1) + conn2 = self.pool.get() + self.assert_(conn2 is not None) + self.assert_(conn2.cursor) + del conn2 + + def fill_test_table(self, conn): + curs = conn.cursor() + for i in range(1000): + curs.execute('insert into test_table (value_int) values (%s)' % i) + conn.commit() + + def test_returns_immediately(self): + self.pool = self.create_pool() + conn = self.pool.get() + self.set_up_test_table(conn) + self.fill_test_table(conn) + curs = conn.cursor() + results = [] + SHORT_QUERY = "select * from test_table" + evt = coros.event() + def a_query(): + self.assert_cursor_works(curs) + curs.execute(SHORT_QUERY) + results.append(2) + evt.send() + evt2 = coros.event() + api.spawn(a_query) + results.append(1) + self.assertEqual([1], results) + evt.wait() + self.assertEqual([1, 2], results) + + def test_connection_is_clean_after_put(self): + self.pool = self.create_pool() + conn = self.pool.get() + self.set_up_test_table(conn) + curs = conn.cursor() + for i in range(10): + curs.execute('insert into test_table (value_int) values (%s)' % i) + # do not commit :-) + self.pool.put(conn) + del conn + conn2 = self.pool.get() + curs2 = conn2.cursor() + for i in range(10): + curs2.execute('insert into test_table (value_int) values (%s)' % i) + conn2.commit() + rows = curs2.execute("select * from test_table") + # we should have only inserted them once + self.assertEqual(10, rows) + + def test_visibility_from_other_connections(self): + # *FIX: use some non-indra-specific table for testing (can't use a temp table) + self.pool = self.create_pool(3) + conn = self.pool.get() + conn2 = self.pool.get() + curs = conn.cursor() + try: + curs2 = conn2.cursor() + rows2 = curs2.execute("insert into gargleblatz (a) values (%s)" % (314159)) + self.assertEqual(rows2, 1) + conn2.commit() + selection_query = "select * from gargleblatz" + rows2 = curs2.execute(selection_query) + self.assertEqual(rows2, 1) + del curs2 + del conn2 + # create a new connection, it should see the addition + conn3 = self.pool.get() + curs3 = conn3.cursor() + rows3 = curs3.execute(selection_query) + self.assertEqual(rows3, 1) + # now, does the already-open connection see it? + rows = curs.execute(selection_query) + self.assertEqual(rows, 1) + finally: + # clean up my litter + curs.execute("delete from gargleblatz where a=314159") + conn.commit() + + + def test_two_simultaneous_connections(self): + self.pool = self.create_pool(2) + conn = self.pool.get() + self.set_up_test_table(conn) + self.fill_test_table(conn) + curs = conn.cursor() + conn2 = self.pool.get() + self.set_up_test_table(conn2) + self.fill_test_table(conn2) + curs2 = conn2.cursor() + results = [] + LONG_QUERY = "select * from test_table" + SHORT_QUERY = "select * from test_table where row_id <= 20" + + evt = coros.event() + def long_running_query(): + self.assert_cursor_works(curs) + curs.execute(LONG_QUERY) + results.append(1) + evt.send() + evt2 = coros.event() + def short_running_query(): + self.assert_cursor_works(curs2) + curs2.execute(SHORT_QUERY) + results.append(2) + evt2.send() + + api.spawn(long_running_query) + api.spawn(short_running_query) + evt.wait() + evt2.wait() + #print "results %s" % results + results.sort() + self.assertEqual([1, 2], results) + +import MySQLdb + +class TestMysqlConnectionPool(TestDBConnectionPool, unittest.TestCase): + def setUp(self): + try: + import simplejson + import os.path + auth_utf8 = simplejson.load(open(os.path.join(os.path.dirname(__file__), 'auth.json'))) + # have to convert unicode objects to str objects because mysqldb is dum + self._auth = dict([(str(k), str(v)) + for k, v in auth_utf8.items()]) + self._dbmodule = MySQLdb + except (IOError, ImportError), e: + self._auth = {'host': 'localhost','user': 'root','passwd': '','db': 'persist0'} + super(TestMysqlConnectionPool, self).setUp() + + def create_db(self): + auth = self._auth.copy() + try: + self.drop_db() + except Exception: + pass + dbname = auth.pop('db') + db = MySQLdb.connect(**auth).cursor() + db.execute("create database "+dbname) + db.close() + del db + + def drop_db(self): + db = MySQLdb.connect(**self._auth).cursor() + db.execute("drop database "+self._auth['db']) + db.close() + del db + + +if __name__ == '__main__': + unittest.main() diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py new file mode 100644 index 0000000..b899d8a --- /dev/null +++ b/eventlet/saranwrap.py @@ -0,0 +1,648 @@ +"""\ +@file saranwrap.py +@author Phoenix +@date 2007-07-13 +@brief A simple, pickle based rpc mechanism which reflects python +objects and callables. + +Copyright (c) 2007, Linden Research, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +This file provides classes and exceptions used for simple python level +remote procedure calls. This is achieved by intercepting the basic +getattr and setattr calls in a client proxy, which commnicates those +down to the server which will dispatch them to objects in it's process +space. + +The basic protocol to get and set attributes is for the client proxy +to issue the command: + +getattr $id $name +setattr $id $name $value + +getitem $id $item +setitem $id $item $value +eq $id $rhs +del $id + +When the get returns a callable, the client proxy will provide a +callable proxy which will invoke a remote procedure call. The command +issued from the callable proxy to server is: + +call $id $name $args $kwargs + +If the client supplies an id of None, then the get/set/call is applied +to the object(s) exported from the server. + +The server will parse the get/set/call, take the action indicated, and +return back to the caller one of: + +value $val +callable +object $id +exception $excp + +To handle object expiration, the proxy will instruct the rpc server to +discard objects which are no longer in use. This is handled by +catching proxy deletion and sending the command: + +del $id + +The server will handle this by removing clearing it's own internal +references. This does not mean that the object will necessarily be +cleaned from the server, but no artificial references will remain +after successfully completing. On completion, the server will return +one of: + +value None +exception $excp + +The server also accepts a special command for debugging purposes: + +status + +Which will be intercepted by the server to write back: + +status {...} + +The wire protocol is to pickle the Request class in this file. The +request class is basically an action and a map of parameters' +""" + +import os +import cPickle +import struct +import sys + +try: + set = set + frozenset = frozenset +except NameError: + from sets import Set as set, ImmutableSet as frozenset + +from eventlet.processes import Process +from eventlet import api + +# +# debugging hooks +# +_g_debug_mode = False +if _g_debug_mode: + import traceback + +def pythonpath_sync(): + """ +@brief apply the current sys.path to the environment variable PYTHONPATH, so that child processes have the same paths as the caller does. +""" + pypath = os.pathsep.join(sys.path) + os.environ['PYTHONPATH'] = pypath + +def wrap(obj, dead_callback = None): + """ +@brief wrap in object in another process through a saranwrap proxy +@param object The object to wrap. +@param dead_callback A callable to invoke if the process exits.""" + + if type(obj).__name__ == 'module': + return wrap_module(obj.__name__, dead_callback) + pythonpath_sync() + p = Process('python', [__file__, '--child'], dead_callback) + prox = Proxy(p, p) + prox.obj = obj + return prox.obj + +def wrap_module(fqname, dead_callback = None): + """ +@brief wrap a module in another process through a saranwrap proxy +@param fqname The fully qualified name of the module. +@param dead_callback A callable to invoke if the process exits.""" + pythonpath_sync() + global _g_debug_mode + if _g_debug_mode: + p = Process('python', [__file__, '--module', fqname, '--logfile', '/tmp/saranwrap.log'], dead_callback) + else: + p = Process('python', [__file__, '--module', fqname,], dead_callback) + prox = Proxy(p, p) + return prox + +def status(proxy): + """ +@brief get the status from the server through a proxy +@param proxy a saranwrap.Proxy object connected to a server.""" + _write_request(Request('status', {}), proxy.__local_dict['_out']) + return _read_response(None, None, proxy.__local_dict['_in'], proxy.__local_dict['_out'], None) + +class BadResponse(Exception): + """"This exception is raised by an saranwrap client when it could + parse but cannot understand the response from the server.""" + pass + +class BadRequest(Exception): + """"This exception is raised by a saranwrap server when it could parse + but cannot understand the response from the server.""" + pass + +class UnrecoverableError(Exception): + pass + +class Request(object): + "@brief A wrapper class for proxy requests to the server." + def __init__(self, action, param): + self._action = action + self._param = param + def __str__(self): + return "Request `"+self._action+"` "+str(self._param) + def __getitem__(self, name): + return self._param[name] + def action(self): + return self._action + +def _read_lp_hunk(stream): + len_bytes = stream.read(4) + length = struct.unpack('I', len_bytes)[0] + body = stream.read(length) + return body + +def _read_response(id, attribute, input, output, dead_list): + """@brief local helper method to read respones from the rpc server.""" + try: + str = _read_lp_hunk(input) + _prnt(`str`) + response = cPickle.loads(str) + except AttributeError, e: + raise UnrecoverableError(e) + _prnt("response: %s" % response) + if response[0] == 'value': + return response[1] + elif response[0] == 'callable': + return CallableProxy(id, attribute, input, output, dead_list) + elif response[0] == 'object': + return ObjectProxy(input, output, response[1], dead_list) + elif response[0] == 'exception': + exp = response[1] + raise exp + else: + raise BadResponse(response[0]) + +def _write_lp_hunk(stream, hunk): + write_length = struct.pack('I', len(hunk)) + stream.write(write_length + hunk) + if hasattr(stream, 'flush'): + stream.flush() + +def _write_request(param, output): + _prnt("request: %s" % param) + str = cPickle.dumps(param) + _write_lp_hunk(output, str) + +def _is_local(attribute): + "Return true if the attribute should be handled locally" +# return attribute in ('_in', '_out', '_id', '__getattribute__', '__setattr__', '__dict__') + # good enough for now. :) + if '__local_dict' in attribute: + return True + return False + +def _prnt(message): + global _g_debug_mode + if _g_debug_mode: + print message + +_g_logfile = None +def _log(message): + global _g_logfile + if _g_logfile: + _g_logfile.write(str(os.getpid()) + ' ' + message) + _g_logfile.write('\n') + _g_logfile.flush() + +def _unmunge_attr_name(name): + """ Sometimes attribute names come in with classname prepended, not sure why. + This function removes said classname, because we're huge hackers and we didn't + find out what the true right thing to do is. *FIX: find out. """ + if(name.startswith('_Proxy')): + name = name[len('_Proxy'):] + if(name.startswith('_ObjectProxy')): + name = name[len('_ObjectProxy'):] + return name + + +class Proxy(object): + """\ +@class Proxy +@brief This class wraps a remote python process, presumably available +in an instance of an Server. + +This is the class you will typically use as a client to a child +process. Simply instantiate one around a file-like interface and start +calling methods on the thing that is exported. The dir() builtin is +not supported, so you have to know what has been exported. +""" + def __init__(self, input, output, dead_list = None): + """\ +@param input a file-like object which supports read(). +@param output a file-like object which supports write() and flush(). +@param id an identifier for the remote object. humans do not provide this. +""" + # default dead_list inside the function because all objects in method + # argument lists are init-ed only once globally + if dead_list is None: + dead_list = set() + #_prnt("Proxy::__init__") + self.__local_dict = dict( + _in = input, + _out = output, + _dead_list = dead_list, + _id = None) + + def __getattribute__(self, attribute): + #_prnt("Proxy::__getattr__: %s" % attribute) + if _is_local(attribute): + # call base class getattribute so we actually get the local variable + attribute = _unmunge_attr_name(attribute) + return super(Proxy, self).__getattribute__(attribute) + else: + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + + _dead_list = self.__local_dict['_dead_list'] + for dead_object in _dead_list.copy(): + request = Request('del', {'id':dead_object}) + _write_request(request, my_out) + response = _read_response(my_id, attribute, my_in, my_out, _dead_list) + _dead_list.remove(dead_object) + + # Pass all public attributes across to find out if it is + # callable or a simple attribute. + request = Request('getattr', {'id':my_id, 'attribute':attribute}) + _write_request(request, my_out) + return _read_response(my_id, attribute, my_in, my_out, _dead_list) + + def __setattr__(self, attribute, value): + #_prnt("Proxy::__setattr__: %s" % attribute) + if _is_local(attribute): + # It must be local to this actual object, so we have to apply + # it to the dict in a roundabout way + attribute = _unmunge_attr_name(attribute) + super(Proxy, self).__getattribute__('__dict__')[attribute]=value + else: + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + _dead_list = self.__local_dict['_dead_list'] + # Pass the set attribute across + request = Request('setattr', {'id':my_id, 'attribute':attribute, 'value':value}) + _write_request(request, my_out) + return _read_response(my_id, attribute, my_in, my_out, _dead_list) + +class ObjectProxy(Proxy): + """\ +@class ObjectProxy +@brief This class wraps a remote object in the Server + +This class will be created during normal operation, and users should +not need to deal with this class directly.""" + + def __init__(self, input, output, id, dead_list): + """\ +@param input a file-like object which supports read(). +@param output a file-like object which supports write() and flush(). +@param id an identifier for the remote object. humans do not provide this. +""" + Proxy.__init__(self, input, output, dead_list) + self.__local_dict['_id'] = id + #_prnt("ObjectProxy::__init__ %s" % self._id) + + def __del__(self): + my_id = self.__local_dict['_id'] + _prnt("ObjectProxy::__del__ %s" % my_id) + self.__local_dict['_dead_list'].add(my_id) + + def __getitem__(self, key): + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + _dead_list = self.__local_dict['_dead_list'] + request = Request('getitem', {'id':my_id, 'key':key}) + _write_request(request, my_out) + return _read_response(my_id, key, my_in, my_out, _dead_list) + + def __setitem__(self, key, value): + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + _dead_list = self.__local_dict['_dead_list'] + request = Request('setitem', {'id':my_id, 'key':key, 'value':value}) + _write_request(request, my_out) + return _read_response(my_id, key, my_in, my_out, _dead_list) + + def __eq__(self, rhs): + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + _dead_list = self.__local_dict['_dead_list'] + request = Request('eq', {'id':my_id, 'rhs':rhs.__local_dict['_id']}) + _write_request(request, my_out) + return _read_response(my_id, None, my_in, my_out, _dead_list) + + def __repr__(self): + # apparently repr(obj) skips the whole getattribute thing and just calls __repr__ + # directly. Therefore we just pass it through the normal call pipeline, and + # tack on a little header so that you can tell it's an object proxy. + val = self.__repr__() + return "saran:%s" % val + + def __str__(self): + # see description for __repr__, because str(obj) works the same. We don't + # tack anything on to the return value here because str values are used as data. + return self.__str__() + + def __len__(self): + # see description for __repr__, len(obj) is the same. Unfortunately, __len__ is also + # used when determining whether an object is boolean or not, e.g. if proxied_object: + return self.__len__() + +def proxied_type(self): + if type(self) is not ObjectProxy: + return type(self) + + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + request = Request('type', {'id':my_id}) + _write_request(request, my_out) + # dead list can be none because we know the result will always be + # a value and not an ObjectProxy itself + return _read_response(my_id, None, my_in, my_out, None) + +class CallableProxy(object): + """\ +@class CallableProxy +@brief This class wraps a remote function in the Server + +This class will be created by an Proxy during normal operation, +and users should not need to deal with this class directly.""" + + def __init__(self, object_id, name, input, output, dead_list): + #_prnt("CallableProxy::__init__: %s, %s" % (object_id, name)) + self._object_id = object_id + self._name = name + self._in = input + self._out = output + self._dead_list = dead_list + + def __call__(self, *args, **kwargs): + #_prnt("CallableProxy::__call__: %s, %s" % (args, kwargs)) + + # Pass the call across. We never build a callable without + # having already checked if the method starts with '_' so we + # can safely pass this one to the remote object. + #_prnt("calling %s %s" % (self._object_id, self._name) + request = Request('call', {'id':self._object_id, 'name':self._name, 'args':args, 'kwargs':kwargs}) + _write_request(request, self._out) + return _read_response(self._object_id, self._name, self._in, self._out, self._dead_list) + +class Server(object): + def __init__(self, input, output, export): + """\ +@param input a file-like object which supports read(). +@param output a file-like object which supports write() and flush(). +@param export an object, function, or map which is exported to clients +when the id is None.""" + #_log("Server::__init__") + self._in = input + self._out = output + self._export = export + self._next_id = 1 + self._objects = {} + + def handle_status(self, object, req): + return { + 'object_count':len(self._objects), + 'next_id':self._next_id, + 'pid':os.getpid()} + + def handle_getattr(self, object, req): + try: + return getattr(object, req['attribute']) + except AttributeError, e: + if hasattr(object, "__getitem__"): + return object[req['attribute']] + else: + raise e + #_log('getattr: %s' % str(response)) + + def handle_setattr(self, object, req): + try: + return setattr(object, req['attribute'], req['value']) + except AttributeError, e: + if hasattr(object, "__setitem__"): + return object.__setitem__(req['attribute'], req['value']) + else: + raise e + + def handle_getitem(self, object, req): + return object[req['key']] + + def handle_setitem(self, object, req): + object[req['key']] = req['value'] + return None # *TODO figure out what the actual return value of __setitem__ should be + + def handle_eq(self, object, req): + #_log("__eq__ %s %s" % (object, req)) + rhs = None + try: + rhs = self._objects[req['rhs']] + except KeyError, e: + return False + return (object == rhs) + + def handle_call(self, object, req): + #_log("calling %s " % (req['name'])) + try: + fn = getattr(object, req['name']) + except AttributeError, e: + if hasattr(object, "__setitem__"): + fn = object[req['name']] + else: + raise e + + return fn(*req['args'],**req['kwargs']) + + def handle_del(self, object, req): + id = req['id'] + _log("del %s from %s" % (id, self._objects)) + + # *TODO what does __del__ actually return? + del self._objects[id] + return None + + def handle_type(self, object, req): + return type(object) + + def loop(self): + """@brief Loop forever and respond to all requests.""" + _log("Server::loop") + while True: + try: + try: + str = _read_lp_hunk(self._in) + except EOFError: + sys.exit(0) # normal exit + request = cPickle.loads(str) + _log("request: %s (%s)" % (request, self._objects)) + req = request + id = None + object = None + try: + id = req['id'] + if id: + id = int(id) + object = self._objects[id] + #_log("id, object: %d %s" % (id, object)) + except Exception, e: + #_log("Exception %s" % str(e)) + pass + if object is None or id is None: + id = None + object = self._export + #_log("found object %s" % str(object)) + + # Handle the request via a method with a special name on the server + handler_name = 'handle_%s' % request.action() + + try: + handler = getattr(self, handler_name) + except AttributeError: + raise BadRequest, request.action() + + response = handler(object, request) + + # figure out what to do with the response, and respond + # apprpriately. + if request.action() in ['status', 'type']: + # have to handle these specially since we want to + # pickle up the actual value and not return a proxy + self.respond(['value', response]) + elif callable(response): + #_log("callable %s" % response) + self.respond(['callable']) + elif self.is_value(response): + self.respond(['value', response]) + else: + self._objects[self._next_id] = response + #_log("objects: %s" % str(self._objects)) + self.respond(['object', self._next_id]) + self._next_id += 1 + except SystemExit, e: + raise e + except Exception, e: + self.write_exception(e) + except: + self.write_exception(sys.exc_info()[0]) + + def is_value(self, value): + """\ +@brief Test if value should be serialized as a simple dataset. +@param value The value to test. +@return Returns true if value is a simple serializeable set of data. +""" + return type(value) in (str,unicode,int,float,long,bool,type(None)) + + def respond(self, body): + _log("responding with: %s" % body) + #_log("objects: %s" % self._objects) + s = cPickle.dumps(body) + _log(`s`) + str = _write_lp_hunk(self._out, s) + + def write_exception(self, e): + """@brief Helper method to respond with an exception.""" + #_log("exception: %s" % sys.exc_info()[0]) + # TODO: serialize traceback using generalization of code from mulib.htmlexception + self.respond(['exception', e]) + global _g_debug_mode + if _g_debug_mode: + _log("traceback: %s" % traceback.format_tb(sys.exc_info()[2])) + + +# test function used for testing that final except clause +def raise_a_weird_error(): + raise "oh noes you can raise a string" + +# test function used for testing return of unpicklable exceptions +def raise_an_unpicklable_error(): + class Unpicklable(Exception): + pass + raise Unpicklable() + +# test function used for testing return of picklable exceptions +def raise_standard_error(): + raise FloatingPointError() + +# test function to make sure print doesn't break the wrapper +def print_string(str): + print str + +# test function to make sure printing on stdout doesn't break the +# wrapper +def err_string(str): + print >>sys.stderr, str + +def main(): + import optparse + parser = optparse.OptionParser( + usage="usage: %prog [options]", + description="Simple saranwrap.Server wrapper") + parser.add_option( + '-c', '--child', default=False, action='store_true', + help='Wrap an object serialed via setattr.') + parser.add_option( + '-m', '--module', type='string', dest='module', default=None, + help='a module to load and export.') + parser.add_option( + '-l', '--logfile', type='string', dest='logfile', default=None, + help='file to log to.') + options, args = parser.parse_args() + global _g_logfile + if options.logfile: + _g_logfile = open(options.logfile, 'a') + if options.module: + export = api.named(options.module) + server = Server(sys.stdin, sys.stdout, export) + elif options.child: + server = Server(sys.stdin, sys.stdout, {}) + + # *HACK: some modules may emit on stderr, which breaks everything. + class NullSTDOut(object): + def write(a, b): + pass + sys.stderr = NullSTDOut() + sys.stdout = NullSTDOut() + + # Loop until EOF + server.loop() + if _g_logfile: + _g_logfile.close() + + +if __name__ == "__main__": + main() diff --git a/eventlet/saranwrap_test.py b/eventlet/saranwrap_test.py new file mode 100644 index 0000000..1ebd536 --- /dev/null +++ b/eventlet/saranwrap_test.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# @file test_saranwrap.py +# @brief Test cases for saranwrap. +# +# Copyright (c) 2007, Linden Research, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from eventlet import saranwrap + +import os +import sys +import tempfile +import unittest +import uuid + +def list_maker(): + return [0,1,2] + +class TestSaranwrap(unittest.TestCase): + def assert_server_exists(self, prox): + self.assert_(saranwrap.status(prox)) + prox.foo = 0 + self.assertEqual(0, prox.foo) + + def test_wrap_tuple(self): + my_tuple = (1, 2) + prox = saranwrap.wrap(my_tuple) + self.assertEqual(prox[0], 1) + self.assertEqual(prox[1], 2) + self.assertEqual(len(my_tuple), 2) + + def test_wrap_string(self): + my_object = "whatever" + prox = saranwrap.wrap(my_object) + self.assertEqual(str(my_object), str(prox)) + self.assertEqual(len(my_object), len(prox)) + self.assertEqual(my_object.join(['a', 'b']), prox.join(['a', 'b'])) + + def test_wrap_uniterable(self): + # here we're treating the exception as just a normal class + prox = saranwrap.wrap(FloatingPointError()) + def index(): + prox[0] + def key(): + prox['a'] + + self.assertRaises(IndexError, index) + self.assertRaises(TypeError, key) + + def test_wrap_dict(self): + my_object = {'a':1} + prox = saranwrap.wrap(my_object) + self.assertEqual('a', prox.keys()[0]) + self.assertEqual(1, prox['a']) + self.assertEqual(str(my_object), str(prox)) + self.assertEqual('saran:' + repr(my_object), repr(prox)) + self.assertEqual('saran:' + `my_object`, `prox`) + + def test_wrap_module_class(self): + prox = saranwrap.wrap(uuid) + self.assertEqual(saranwrap.Proxy, type(prox)) + id = prox.uuid4() + self.assertEqual(id.get_version(), uuid.uuid4().get_version()) + + def test_wrap_eq(self): + prox = saranwrap.wrap(uuid) + id1 = prox.uuid4() + id2 = prox.UUID(str(id1)) + self.assertEqual(id1, id2) + id3 = prox.uuid4() + self.assert_(id1 != id3) + + def test_multiple_wraps(self): + prox1 = saranwrap.wrap(uuid) + prox2 = saranwrap.wrap(uuid) + x1 = prox1.uuid4() + x2 = prox1.uuid4() + del x2 + x3 = prox2.uuid4() + + def test_dict_passthru(self): + prox = saranwrap.wrap(uuid) + x = prox.uuid4() + self.assertEqual(type(x.__dict__), saranwrap.ObjectProxy) + # try it all on one line just for the sake of it + self.assertEqual(type(saranwrap.wrap(uuid).uuid4().__dict__), saranwrap.ObjectProxy) + + def test_is_value(self): + server = saranwrap.Server(None, None, None) + self.assert_(server.is_value(None)) + + def test_wrap_getitem(self): + prox = saranwrap.wrap([0,1,2]) + self.assertEqual(prox[0], 0) + + def test_wrap_setitem(self): + prox = saranwrap.wrap([0,1,2]) + prox[1] = 2 + self.assertEqual(prox[1], 2) + + def test_raising_exceptions(self): + prox = saranwrap.wrap(uuid) + def nofunc(): + prox.never_name_a_function_like_this() + self.assertRaises(AttributeError, nofunc) + + def test_raising_weird_exceptions(self): + # the recursion is killing me! + prox = saranwrap.wrap(saranwrap) + try: + prox.raise_a_weird_error() + self.assert_(False) + except: + import sys + ex = sys.exc_info()[0] + self.assertEqual(ex, "oh noes you can raise a string") + self.assert_server_exists(prox) + + def test_unpicklable_server_exception(self): + prox = saranwrap.wrap(saranwrap) + def unpickle(): + prox.raise_an_unpicklable_error() + + self.assertRaises(saranwrap.UnrecoverableError, unpickle) + + # It's basically dead + #self.assert_server_exists(prox) + + def test_pickleable_server_exception(self): + prox = saranwrap.wrap(saranwrap) + def fperror(): + prox.raise_standard_error() + + self.assertRaises(FloatingPointError, fperror) + self.assert_server_exists(prox) + + def test_print_does_not_break_wrapper(self): + prox = saranwrap.wrap(saranwrap) + prox.print_string('hello') + self.assert_server_exists(prox) + + def test_stderr_does_not_break_wrapper(self): + prox = saranwrap.wrap(saranwrap) + prox.err_string('goodbye') + self.assert_server_exists(prox) + + def assertLessThan(self, a, b): + self.assert_(a < b, "%s is not less than %s" % (a, b)) + + def test_status(self): + prox = saranwrap.wrap(uuid) + a = prox.uuid4() + status = saranwrap.status(prox) + self.assertEqual(status['object_count'], 1) + self.assertEqual(status['next_id'], 2) + self.assert_(status['pid']) # can't guess what it will be + # status of an object should be the same as the module + self.assertEqual(saranwrap.status(a), status) + # create a new one then immediately delete it + prox.uuid4() + is_id = prox.getnode() # sync up deletes + status = saranwrap.status(prox) + self.assertEqual(status['object_count'], 1) + self.assertEqual(status['next_id'], 3) + prox2 = saranwrap.wrap(uuid) + self.assert_(status['pid'] != saranwrap.status(prox2)['pid']) + + def test_del(self): + prox = saranwrap.wrap(uuid) + delme = prox.uuid4() + status_before = saranwrap.status(prox) + #print status_before['objects'] + del delme + # need to do an access that doesn't create an object + # in order to sync up the deleted objects + prox.getnode() + status_after = saranwrap.status(prox) + #print status_after['objects'] + self.assertLessThan(status_after['object_count'], status_before['object_count']) + + def test_variable_and_keyword_arguments_with_function_calls(self): + import optparse + prox = saranwrap.wrap(optparse) + parser = prox.OptionParser() + z = parser.add_option('-n', action='store', type='string', dest='n') + opts,args = parser.parse_args(["-nfoo"]) + self.assertEqual(opts.n, 'foo') + + def test_original_proxy_going_out_of_scope(self): + def make_uuid(): + prox = saranwrap.wrap(uuid) + # after this function returns, prox should fall out of scope + return prox.uuid4() + tid = make_uuid() + self.assertEqual(tid.get_version(), uuid.uuid4().get_version()) + def make_list(): + import eventlet.test_saranwrap + prox = saranwrap.wrap(eventlet.test_saranwrap.list_maker) + # after this function returns, prox should fall out of scope + return prox() + proxl = make_list() + self.assertEqual(proxl[2], 2) + + def test_status_of_none(self): + try: + saranwrap.status(None) + self.assert_(False) + except AttributeError, e: + pass + + def test_not_inheriting_pythonpath(self): + # construct a fake module in the temp directory + temp_dir = tempfile.mkdtemp("saranwrap_test") + fp = open(os.path.join(temp_dir, "jitar_hero.py"), "w") + fp.write("""import os, sys +pypath = os.environ['PYTHONPATH'] +sys_path = sys.path""") + fp.close() + + # this should fail because we haven't stuck the temp_dir in our path yet + prox = saranwrap.wrap_module('jitar_hero') + import cPickle + try: + prox.pypath + self.fail() + except cPickle.UnpicklingError: + pass + + # now try to saranwrap it + sys.path.append(temp_dir) + try: + import jitar_hero + prox = saranwrap.wrap(jitar_hero) + self.assert_(prox.pypath.count(temp_dir)) + self.assert_(prox.sys_path.count(temp_dir)) + finally: + import shutil + shutil.rmtree(temp_dir) + sys.path.remove(temp_dir) + + + def test_detection_of_server_crash(self): + # make the server crash here + pass + + def test_equality_with_local_object(self): + # we'll implement this if there's a use case for it + pass + + def test_non_blocking(self): + # here we test whether it's nonblocking + pass + +if __name__ == '__main__': + unittest.main() From f724c671cdc7585a8e49b403a02fc2e0ce10f9f5 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Thu, 6 Dec 2007 22:52:41 -0500 Subject: [PATCH 38/79] [svn r60] A wrapper for the pooled connections so that clients can call close() with impunity. Also it resolves the annoying bool() limitation of saranwrapped objects. --- eventlet/db_pool.py | 93 +++++++++++++++++++++++++++++++++++++--- eventlet/db_pool_test.py | 30 ++++++++++--- 2 files changed, 113 insertions(+), 10 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index 6b37e1c..b68c03c 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -74,21 +74,104 @@ class ConnectionPool(Pool): self._module = module self._args = args self._kwargs = kwargs - Pool.__init__(self, min_size, max_size) + super(ConnectionPool, self).__init__(min_size, max_size) def create(self): return saranwrap.wrap(self._module).connect(*self._args, **self._kwargs) def put(self, conn): - # rollback any uncommitted changes, so that the next process + # rollback any uncommitted changes, so that the next client # has a clean slate. This also pokes the process to see if # it's dead or None try: conn.rollback() except (AttributeError, DeadProcess), e: - conn = self.create() - # TODO figure out if we're still connected to the database + conn = None + + # unwrap the connection for storage + if isinstance(conn, GenericConnectionWrapper): + if conn: + conn = conn._base + else: + conn = None + + # *TODO figure out if we're still connected to the database if conn is not None: - Pool.put(self, conn) + super(ConnectionPool, self).put(conn) else: self.current_size -= 1 + + def get(self): + # wrap the connection for easier use + conn = super(ConnectionPool, self).get() + return PooledConnectionWrapper(conn, self) + + +class GenericConnectionWrapper(object): + def __init__(self, baseconn): + self._base = baseconn + def __enter__(self): return self._base.__enter__() + def __exit__(self, exc, value, tb): return self._base.__exit__(exc, value, tb) + def __repr__(self): return self._base.__repr__() + def affected_rows(self): return self._base.affected_rows() + def autocommit(self,*args, **kwargs): return self._base.autocommit(*args, **kwargs) + def begin(self): return self._base.begin() + def change_user(self,*args, **kwargs): return self._base.change_user(*args, **kwargs) + def character_set_name(self,*args, **kwargs): return self._base.character_set_name(*args, **kwargs) + def close(self,*args, **kwargs): return self._base.close(*args, **kwargs) + def commit(self,*args, **kwargs): return self._base.commit(*args, **kwargs) + def cursor(self, cursorclass=None): return self._base.cursor(cursorclass) + def dump_debug_info(self,*args, **kwargs): return self._base.dump_debug_info(*args, **kwargs) + def errno(self,*args, **kwargs): return self._base.errno(*args, **kwargs) + def error(self,*args, **kwargs): return self._base.error(*args, **kwargs) + def errorhandler(self, conn, curs, errcls, errval): return self._base.errorhandler(conn, curs, errcls, errval) + def literal(self, o): return self._base.literal(o) + def set_character_set(self, charset): return self._base.set_character_set(charset) + def set_sql_mode(self, sql_mode): return self._base.set_sql_mode(sql_mode) + def show_warnings(self): return self._base.show_warnings() + def warning_count(self): return self._base.warning_count() + def literal(self, o): return self._base.literal(o) + def ping(self,*args, **kwargs): return self._base.ping(*args, **kwargs) + def query(self,*args, **kwargs): return self._base.query(*args, **kwargs) + def rollback(self,*args, **kwargs): return self._base.rollback(*args, **kwargs) + def select_db(self,*args, **kwargs): return self._base.select_db(*args, **kwargs) + def set_server_option(self,*args, **kwargs): return self._base.set_server_option(*args, **kwargs) + def set_character_set(self, charset): return self._base.set_character_set(charset) + def set_sql_mode(self, sql_mode): return self._base.set_sql_mode(sql_mode) + def server_capabilities(self,*args, **kwargs): return self._base.server_capabilities(*args, **kwargs) + def show_warnings(self): return self._base.show_warnings() + def shutdown(self,*args, **kwargs): return self._base.shutdown(*args, **kwargs) + def sqlstate(self,*args, **kwargs): return self._base.sqlstate(*args, **kwargs) + def stat(self,*args, **kwargs): return self._base.stat(*args, **kwargs) + def store_result(self,*args, **kwargs): return self._base.store_result(*args, **kwargs) + def string_literal(self,*args, **kwargs): return self._base.string_literal(*args, **kwargs) + def thread_id(self,*args, **kwargs): return self._base.thread_id(*args, **kwargs) + def use_result(self,*args, **kwargs): return self._base.use_result(*args, **kwargs) + def warning_count(self): return self._base.warning_count() + + +class PooledConnectionWrapper(GenericConnectionWrapper): + """ A connection wrapper where: + - the close method returns the connection to the pool instead of closing it directly + - you can do if conn: (yay) + - returns itself to the pool if it gets garbage collected + """ + def __init__(self, baseconn, pool): + super(PooledConnectionWrapper, self).__init__(baseconn) + self._pool = pool + + def __nonzero__(self): + return hasattr(self, '_base') + + def close(self): + """ Return the connection to the pool, and remove the + reference to it so that you can't use it again through this + wrapper object. + """ + if self: + self._pool.put(self._base) + del self._base + + def __del__(self): + self.close() + diff --git a/eventlet/db_pool_test.py b/eventlet/db_pool_test.py index 33dea27..628beaa 100644 --- a/eventlet/db_pool_test.py +++ b/eventlet/db_pool_test.py @@ -121,6 +121,27 @@ class TestDBConnectionPool(DBTester): self.assert_(conn2.cursor) del conn2 + def test_close_does_a_put(self): + self.assert_(self.pool.free() == 0) + self.connection.close() + self.assert_(self.pool.free() == 1) + self.assertRaises(AttributeError, self.connection.cursor) + + def test_deletion_does_a_put(self): + self.assert_(self.pool.free() == 0) + self.connection = None + self.assert_(self.pool.free() == 1) + + def test_put_doesnt_double_wrap(self): + self.pool.put(self.connection) + conn = self.pool.get() + self.assert_(not isinstance(conn._base, db_pool.PooledConnectionWrapper)) + + def test_bool(self): + self.assert_(self.connection) + self.connection.close() + self.assert_(not self.connection) + def fill_test_table(self, conn): curs = conn.cursor() for i in range(1000): @@ -232,10 +253,10 @@ class TestDBConnectionPool(DBTester): results.sort() self.assertEqual([1, 2], results) -import MySQLdb - class TestMysqlConnectionPool(TestDBConnectionPool, unittest.TestCase): def setUp(self): + import MySQLdb + self._dbmodule = MySQLdb try: import simplejson import os.path @@ -243,7 +264,6 @@ class TestMysqlConnectionPool(TestDBConnectionPool, unittest.TestCase): # have to convert unicode objects to str objects because mysqldb is dum self._auth = dict([(str(k), str(v)) for k, v in auth_utf8.items()]) - self._dbmodule = MySQLdb except (IOError, ImportError), e: self._auth = {'host': 'localhost','user': 'root','passwd': '','db': 'persist0'} super(TestMysqlConnectionPool, self).setUp() @@ -255,13 +275,13 @@ class TestMysqlConnectionPool(TestDBConnectionPool, unittest.TestCase): except Exception: pass dbname = auth.pop('db') - db = MySQLdb.connect(**auth).cursor() + db = self._dbmodule.connect(**auth).cursor() db.execute("create database "+dbname) db.close() del db def drop_db(self): - db = MySQLdb.connect(**self._auth).cursor() + db = self._dbmodule.connect(**self._auth).cursor() db.execute("drop database "+self._auth['db']) db.close() del db From ef76a61204e13bdad7940bbdaeb88351cfb2c466 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Fri, 7 Dec 2007 02:22:00 -0500 Subject: [PATCH 39/79] [svn r61] Finally solved the bool(saranwrapped) problem. Moved the tests to use the time module since uuid.getnode() was hanging forever on this machine. Cleared out the pool pointer on close in db_pool. --- eventlet/db_pool.py | 3 ++- eventlet/saranwrap.py | 17 +++++++++++++++-- eventlet/saranwrap_test.py | 27 ++++++++++++++++++--------- 3 files changed, 35 insertions(+), 12 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index b68c03c..33520d8 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -168,8 +168,9 @@ class PooledConnectionWrapper(GenericConnectionWrapper): reference to it so that you can't use it again through this wrapper object. """ - if self: + if self and self._pool: self._pool.put(self._base) + self._pool = None del self._base def __del__(self): diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index b899d8a..c65bb1d 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -375,9 +375,19 @@ not need to deal with this class directly.""" # tack anything on to the return value here because str values are used as data. return self.__str__() + def __nonzero__(self): + # bool(obj) is another method that skips __getattribute__. There's no good way to just pass + # the method on, so we use a special message. + my_in = self.__local_dict['_in'] + my_out = self.__local_dict['_out'] + my_id = self.__local_dict['_id'] + _dead_list = self.__local_dict['_dead_list'] + request = Request('nonzero', {'id':my_id}) + _write_request(request, my_out) + return _read_response(my_id, None, my_in, my_out, _dead_list) + def __len__(self): - # see description for __repr__, len(obj) is the same. Unfortunately, __len__ is also - # used when determining whether an object is boolean or not, e.g. if proxied_object: + # see description for __repr__, len(obj) is the same. return self.__len__() def proxied_type(self): @@ -498,6 +508,9 @@ when the id is None.""" def handle_type(self, object, req): return type(object) + def handle_nonzero(self, object, req): + return bool(object) + def loop(self): """@brief Loop forever and respond to all requests.""" _log("Server::loop") diff --git a/eventlet/saranwrap_test.py b/eventlet/saranwrap_test.py index 1ebd536..a8603e8 100644 --- a/eventlet/saranwrap_test.py +++ b/eventlet/saranwrap_test.py @@ -27,6 +27,7 @@ from eventlet import saranwrap import os import sys import tempfile +import time import unittest import uuid @@ -78,6 +79,7 @@ class TestSaranwrap(unittest.TestCase): self.assertEqual(saranwrap.Proxy, type(prox)) id = prox.uuid4() self.assertEqual(id.get_version(), uuid.uuid4().get_version()) + self.assert_(repr(prox.uuid4)) def test_wrap_eq(self): prox = saranwrap.wrap(uuid) @@ -87,6 +89,13 @@ class TestSaranwrap(unittest.TestCase): id3 = prox.uuid4() self.assert_(id1 != id3) + def test_wrap_nonzero(self): + prox = saranwrap.wrap(uuid) + id1 = prox.uuid4() + self.assert_(bool(id1)) + prox2 = saranwrap.wrap([1, 2, 3]) + self.assert_(bool(prox2)) + def test_multiple_wraps(self): prox1 = saranwrap.wrap(uuid) prox2 = saranwrap.wrap(uuid) @@ -165,8 +174,8 @@ class TestSaranwrap(unittest.TestCase): self.assert_(a < b, "%s is not less than %s" % (a, b)) def test_status(self): - prox = saranwrap.wrap(uuid) - a = prox.uuid4() + prox = saranwrap.wrap(time) + a = prox.gmtime(0) status = saranwrap.status(prox) self.assertEqual(status['object_count'], 1) self.assertEqual(status['next_id'], 2) @@ -174,8 +183,8 @@ class TestSaranwrap(unittest.TestCase): # status of an object should be the same as the module self.assertEqual(saranwrap.status(a), status) # create a new one then immediately delete it - prox.uuid4() - is_id = prox.getnode() # sync up deletes + prox.gmtime(1) + is_id = prox.ctime(1) # sync up deletes status = saranwrap.status(prox) self.assertEqual(status['object_count'], 1) self.assertEqual(status['next_id'], 3) @@ -183,14 +192,14 @@ class TestSaranwrap(unittest.TestCase): self.assert_(status['pid'] != saranwrap.status(prox2)['pid']) def test_del(self): - prox = saranwrap.wrap(uuid) - delme = prox.uuid4() + prox = saranwrap.wrap(time) + delme = prox.gmtime(0) status_before = saranwrap.status(prox) #print status_before['objects'] del delme # need to do an access that doesn't create an object # in order to sync up the deleted objects - prox.getnode() + prox.ctime(1) status_after = saranwrap.status(prox) #print status_after['objects'] self.assertLessThan(status_after['object_count'], status_before['object_count']) @@ -211,8 +220,8 @@ class TestSaranwrap(unittest.TestCase): tid = make_uuid() self.assertEqual(tid.get_version(), uuid.uuid4().get_version()) def make_list(): - import eventlet.test_saranwrap - prox = saranwrap.wrap(eventlet.test_saranwrap.list_maker) + from eventlet import saranwrap_test + prox = saranwrap.wrap(saranwrap_test.list_maker) # after this function returns, prox should fall out of scope return prox() proxl = make_list() From 6eba379ea985a7c67fd670705ab71f7e0de0b3b6 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 10 Dec 2007 21:47:25 -0500 Subject: [PATCH 40/79] [svn r62] Cleaned up the two different paths to closing a PooledConnectionWrapper, so that they are the same: the put method of ConnectionPool. --- eventlet/db_pool.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index 33520d8..9a6c955 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -85,17 +85,21 @@ class ConnectionPool(Pool): # it's dead or None try: conn.rollback() - except (AttributeError, DeadProcess), e: + except: + # we don't care what the exception was, we just know the + # connection is dead + print "WARNING: connection.rollback raised: %s" % (sys.exc_info()[1]) conn = None # unwrap the connection for storage if isinstance(conn, GenericConnectionWrapper): if conn: - conn = conn._base + base = conn._base + conn._destroy() + conn = base else: conn = None - # *TODO figure out if we're still connected to the database if conn is not None: super(ConnectionPool, self).put(conn) else: @@ -153,7 +157,7 @@ class GenericConnectionWrapper(object): class PooledConnectionWrapper(GenericConnectionWrapper): """ A connection wrapper where: - the close method returns the connection to the pool instead of closing it directly - - you can do if conn: (yay) + - you can do if conn: - returns itself to the pool if it gets garbage collected """ def __init__(self, baseconn, pool): @@ -161,17 +165,19 @@ class PooledConnectionWrapper(GenericConnectionWrapper): self._pool = pool def __nonzero__(self): - return hasattr(self, '_base') + return (hasattr(self, '_base') and bool(self._base)) + def _destroy(self): + self._pool = None + del self._base + def close(self): """ Return the connection to the pool, and remove the reference to it so that you can't use it again through this wrapper object. """ if self and self._pool: - self._pool.put(self._base) - self._pool = None - del self._base + self._pool.put(self) def __del__(self): self.close() From f53f7e22486198bea4a8bb0c99748c6f9863647c Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 10 Dec 2007 22:39:15 -0500 Subject: [PATCH 41/79] [svn r63] See what happens when you commit without testing? This is improved. --- eventlet/db_pool.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index 9a6c955..f8b437c 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -23,7 +23,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import os +import os, sys from eventlet.pools import Pool from eventlet.processes import DeadProcess @@ -85,6 +85,9 @@ class ConnectionPool(Pool): # it's dead or None try: conn.rollback() + except AttributeError, e: + # this means it's already been destroyed + conn = None except: # we don't care what the exception was, we just know the # connection is dead @@ -169,7 +172,10 @@ class PooledConnectionWrapper(GenericConnectionWrapper): def _destroy(self): self._pool = None - del self._base + try: + del self._base + except AttributeError: + pass def close(self): """ Return the connection to the pool, and remove the @@ -178,6 +184,7 @@ class PooledConnectionWrapper(GenericConnectionWrapper): """ if self and self._pool: self._pool.put(self) + self._destroy() def __del__(self): self.close() From 3526e674947b5c60168b582965938205f01342b8 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 11 Dec 2007 02:09:39 -0500 Subject: [PATCH 42/79] [svn r64] Simple repro for the weird bug we were seeing. No fix implemented. --- eventlet/saranwrap_test.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/eventlet/saranwrap_test.py b/eventlet/saranwrap_test.py index a8603e8..2a43dae 100644 --- a/eventlet/saranwrap_test.py +++ b/eventlet/saranwrap_test.py @@ -22,7 +22,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -from eventlet import saranwrap +from eventlet import saranwrap, coros import os import sys @@ -31,9 +31,14 @@ import time import unittest import uuid +# random test stuff def list_maker(): return [0,1,2] +one = 1 +two = 2 +three = 3 + class TestSaranwrap(unittest.TestCase): def assert_server_exists(self, prox): self.assert_(saranwrap.status(prox)) @@ -264,6 +269,18 @@ sys_path = sys.path""") shutil.rmtree(temp_dir) sys.path.remove(temp_dir) + + def test_contention(self): + from eventlet import saranwrap_test + prox = saranwrap.wrap(saranwrap_test) + + pool = coros.CoroutinePool(max_size=4) + waiters = [] + waiters.append(pool.execute(lambda: self.assertEquals(prox.one, 1))) + waiters.append(pool.execute(lambda: self.assertEquals(prox.two, 2))) + waiters.append(pool.execute(lambda: self.assertEquals(prox.three, 3))) + for waiter in waiters: + waiter.wait() def test_detection_of_server_crash(self): # make the server crash here From c0b6da6383805e6d501562aee3b58e8adb42843c Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 11 Dec 2007 22:02:25 -0500 Subject: [PATCH 43/79] [svn r65] Refactored all the _in, _out, _dead_list stuff into one object, the ChildProcess. Haven't fixed the test_contention test, this is just a refactoring only. --- eventlet/saranwrap.py | 155 ++++++++++++++++++++---------------------- 1 file changed, 74 insertions(+), 81 deletions(-) diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index c65bb1d..fa1c6b9 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -100,9 +100,7 @@ except NameError: from eventlet.processes import Process from eventlet import api -# # debugging hooks -# _g_debug_mode = False if _g_debug_mode: import traceback @@ -124,7 +122,7 @@ def wrap(obj, dead_callback = None): return wrap_module(obj.__name__, dead_callback) pythonpath_sync() p = Process('python', [__file__, '--child'], dead_callback) - prox = Proxy(p, p) + prox = Proxy(ChildProcess(p, p)) prox.obj = obj return prox.obj @@ -139,15 +137,14 @@ def wrap_module(fqname, dead_callback = None): p = Process('python', [__file__, '--module', fqname, '--logfile', '/tmp/saranwrap.log'], dead_callback) else: p = Process('python', [__file__, '--module', fqname,], dead_callback) - prox = Proxy(p, p) + prox = Proxy(ChildProcess(p,p)) return prox def status(proxy): """ @brief get the status from the server through a proxy @param proxy a saranwrap.Proxy object connected to a server.""" - _write_request(Request('status', {}), proxy.__local_dict['_out']) - return _read_response(None, None, proxy.__local_dict['_in'], proxy.__local_dict['_out'], None) + return proxy.__local_dict['_cp'].make_request(Request('status', {})) class BadResponse(Exception): """"This exception is raised by an saranwrap client when it could @@ -171,6 +168,11 @@ class Request(object): return "Request `"+self._action+"` "+str(self._param) def __getitem__(self, name): return self._param[name] + def get(self, name, default = None): + try: + return self[name] + except KeyError: + return default def action(self): return self._action @@ -180,7 +182,7 @@ def _read_lp_hunk(stream): body = stream.read(length) return body -def _read_response(id, attribute, input, output, dead_list): +def _read_response(id, attribute, input, cp): """@brief local helper method to read respones from the rpc server.""" try: str = _read_lp_hunk(input) @@ -192,9 +194,9 @@ def _read_response(id, attribute, input, output, dead_list): if response[0] == 'value': return response[1] elif response[0] == 'callable': - return CallableProxy(id, attribute, input, output, dead_list) + return CallableProxy(id, attribute, cp) elif response[0] == 'object': - return ObjectProxy(input, output, response[1], dead_list) + return ObjectProxy(cp, response[1]) elif response[0] == 'exception': exp = response[1] raise exp @@ -236,40 +238,56 @@ def _log(message): def _unmunge_attr_name(name): """ Sometimes attribute names come in with classname prepended, not sure why. This function removes said classname, because we're huge hackers and we didn't - find out what the true right thing to do is. *FIX: find out. """ + find out what the true right thing to do is. *TODO: find out. """ if(name.startswith('_Proxy')): name = name[len('_Proxy'):] if(name.startswith('_ObjectProxy')): name = name[len('_ObjectProxy'):] return name +class ChildProcess(object): + """\ + This class wraps a remote python process, presumably available + in an instance of an Server. + """ + def __init__(self, instr, outstr, dead_list = None): + """ + @param instr a file-like object which supports read(). + @param outstr a file-like object which supports write() and flush(). + @param dead_list a list of ids of remote objects that are dead + """ + # default dead_list inside the function because all objects in method + # argument lists are init-ed only once globally + _prnt("ChildProcess::__init__") + if dead_list is None: + dead_list = set() + self._dead_list = dead_list + self._in = instr + self._out = outstr + + def make_request(self, request): + _id = request.get('id') + _attribute = request.get('attribute') or request.get('key') or request.get('name') + _write_request(request, self._out) + return _read_response(_id, _attribute, self._in, self) + class Proxy(object): """\ @class Proxy -@brief This class wraps a remote python process, presumably available -in an instance of an Server. +@brief This is the class you will typically use as a client to a child +process. -This is the class you will typically use as a client to a child -process. Simply instantiate one around a file-like interface and start +Simply instantiate one around a file-like interface and start calling methods on the thing that is exported. The dir() builtin is not supported, so you have to know what has been exported. """ - def __init__(self, input, output, dead_list = None): - """\ -@param input a file-like object which supports read(). -@param output a file-like object which supports write() and flush(). -@param id an identifier for the remote object. humans do not provide this. -""" - # default dead_list inside the function because all objects in method - # argument lists are init-ed only once globally - if dead_list is None: - dead_list = set() + def __init__(self, cp): + """@param A ChildProcess instance that wraps the i/o to the child process. + """ #_prnt("Proxy::__init__") self.__local_dict = dict( - _in = input, - _out = output, - _dead_list = dead_list, + _cp = cp, _id = None) def __getattribute__(self, attribute): @@ -279,22 +297,20 @@ not supported, so you have to know what has been exported. attribute = _unmunge_attr_name(attribute) return super(Proxy, self).__getattribute__(attribute) else: - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] - _dead_list = self.__local_dict['_dead_list'] + _dead_list = my_cp._dead_list for dead_object in _dead_list.copy(): request = Request('del', {'id':dead_object}) - _write_request(request, my_out) - response = _read_response(my_id, attribute, my_in, my_out, _dead_list) + + my_cp.make_request(request) _dead_list.remove(dead_object) # Pass all public attributes across to find out if it is # callable or a simple attribute. request = Request('getattr', {'id':my_id, 'attribute':attribute}) - _write_request(request, my_out) - return _read_response(my_id, attribute, my_in, my_out, _dead_list) + return my_cp.make_request(request) def __setattr__(self, attribute, value): #_prnt("Proxy::__setattr__: %s" % attribute) @@ -304,14 +320,11 @@ not supported, so you have to know what has been exported. attribute = _unmunge_attr_name(attribute) super(Proxy, self).__getattribute__('__dict__')[attribute]=value else: - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] - _dead_list = self.__local_dict['_dead_list'] # Pass the set attribute across request = Request('setattr', {'id':my_id, 'attribute':attribute, 'value':value}) - _write_request(request, my_out) - return _read_response(my_id, attribute, my_in, my_out, _dead_list) + return my_cp.make_request(request) class ObjectProxy(Proxy): """\ @@ -321,47 +334,37 @@ class ObjectProxy(Proxy): This class will be created during normal operation, and users should not need to deal with this class directly.""" - def __init__(self, input, output, id, dead_list): + def __init__(self, cp, _id): """\ -@param input a file-like object which supports read(). -@param output a file-like object which supports write() and flush(). -@param id an identifier for the remote object. humans do not provide this. +@param cp A ChildProcess object that wraps the i/o of a child process. +@param _id an identifier for the remote object. humans do not provide this. """ - Proxy.__init__(self, input, output, dead_list) - self.__local_dict['_id'] = id - #_prnt("ObjectProxy::__init__ %s" % self._id) + Proxy.__init__(self, cp) + self.__local_dict['_id'] = _id + #_prnt("ObjectProxy::__init__ %s" % _id) def __del__(self): my_id = self.__local_dict['_id'] - _prnt("ObjectProxy::__del__ %s" % my_id) - self.__local_dict['_dead_list'].add(my_id) + #_prnt("ObjectProxy::__del__ %s" % my_id) + self.__local_dict['_cp']._dead_list.add(my_id) def __getitem__(self, key): - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] - _dead_list = self.__local_dict['_dead_list'] request = Request('getitem', {'id':my_id, 'key':key}) - _write_request(request, my_out) - return _read_response(my_id, key, my_in, my_out, _dead_list) + return my_cp.make_request(request) def __setitem__(self, key, value): - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] - _dead_list = self.__local_dict['_dead_list'] request = Request('setitem', {'id':my_id, 'key':key, 'value':value}) - _write_request(request, my_out) - return _read_response(my_id, key, my_in, my_out, _dead_list) + return my_cp.make_request(request) def __eq__(self, rhs): - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] - _dead_list = self.__local_dict['_dead_list'] request = Request('eq', {'id':my_id, 'rhs':rhs.__local_dict['_id']}) - _write_request(request, my_out) - return _read_response(my_id, None, my_in, my_out, _dead_list) + return my_cp.make_request(request) def __repr__(self): # apparently repr(obj) skips the whole getattribute thing and just calls __repr__ @@ -378,13 +381,10 @@ not need to deal with this class directly.""" def __nonzero__(self): # bool(obj) is another method that skips __getattribute__. There's no good way to just pass # the method on, so we use a special message. - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] - _dead_list = self.__local_dict['_dead_list'] request = Request('nonzero', {'id':my_id}) - _write_request(request, my_out) - return _read_response(my_id, None, my_in, my_out, _dead_list) + return my_cp.make_request(request) def __len__(self): # see description for __repr__, len(obj) is the same. @@ -393,15 +393,11 @@ not need to deal with this class directly.""" def proxied_type(self): if type(self) is not ObjectProxy: return type(self) - - my_in = self.__local_dict['_in'] - my_out = self.__local_dict['_out'] + + my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] request = Request('type', {'id':my_id}) - _write_request(request, my_out) - # dead list can be none because we know the result will always be - # a value and not an ObjectProxy itself - return _read_response(my_id, None, my_in, my_out, None) + return my_cp.make_request(request) class CallableProxy(object): """\ @@ -411,13 +407,11 @@ class CallableProxy(object): This class will be created by an Proxy during normal operation, and users should not need to deal with this class directly.""" - def __init__(self, object_id, name, input, output, dead_list): + def __init__(self, object_id, name, cp): #_prnt("CallableProxy::__init__: %s, %s" % (object_id, name)) self._object_id = object_id self._name = name - self._in = input - self._out = output - self._dead_list = dead_list + self._cp = cp def __call__(self, *args, **kwargs): #_prnt("CallableProxy::__call__: %s, %s" % (args, kwargs)) @@ -427,8 +421,7 @@ and users should not need to deal with this class directly.""" # can safely pass this one to the remote object. #_prnt("calling %s %s" % (self._object_id, self._name) request = Request('call', {'id':self._object_id, 'name':self._name, 'args':args, 'kwargs':kwargs}) - _write_request(request, self._out) - return _read_response(self._object_id, self._name, self._in, self._out, self._dead_list) + return self._cp.make_request(request) class Server(object): def __init__(self, input, output, export): From 5753596ab87e9cfdc6a5b06e4df8f61ce580c1b4 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 11 Dec 2007 22:13:03 -0500 Subject: [PATCH 44/79] [svn r66] Put a simple lock around the write/read pair so that only one request per child process will be on the wire at any time. Thanks, Donovan, for making it so easy! Test_contention passes now. --- eventlet/saranwrap.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index fa1c6b9..27cb039 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -98,7 +98,7 @@ except NameError: from sets import Set as set, ImmutableSet as frozenset from eventlet.processes import Process -from eventlet import api +from eventlet import api, pools # debugging hooks _g_debug_mode = False @@ -264,12 +264,20 @@ class ChildProcess(object): self._dead_list = dead_list self._in = instr self._out = outstr + self._lock = pools.TokenPool(max_size=1) def make_request(self, request): _id = request.get('id') _attribute = request.get('attribute') or request.get('key') or request.get('name') - _write_request(request, self._out) - return _read_response(_id, _attribute, self._in, self) + + t = self._lock.get() + try: + _write_request(request, self._out) + retval = _read_response(_id, _attribute, self._in, self) + finally: + self._lock.put(t) + + return retval class Proxy(object): From 7284646c924fd8d8a0b627cba64ea82a4c15dfb8 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 11 Dec 2007 23:10:09 -0500 Subject: [PATCH 45/79] [svn r67] Was somewhat unhappy with previous auto-detection of attribute value, refactored so that it's passed in. Not that it matters, I wrote a unit test that should have been exercised by this change but it's completely broken no matter which way you slice it. We'd have to change the CallableProxy to enable the list_of_functions use case. --- eventlet/saranwrap.py | 15 +++++++-------- eventlet/saranwrap_test.py | 7 ++++++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index 27cb039..3b081a5 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -266,14 +266,13 @@ class ChildProcess(object): self._out = outstr self._lock = pools.TokenPool(max_size=1) - def make_request(self, request): + def make_request(self, request, attribute=None): _id = request.get('id') - _attribute = request.get('attribute') or request.get('key') or request.get('name') t = self._lock.get() try: _write_request(request, self._out) - retval = _read_response(_id, _attribute, self._in, self) + retval = _read_response(_id, attribute, self._in, self) finally: self._lock.put(t) @@ -318,7 +317,7 @@ not supported, so you have to know what has been exported. # Pass all public attributes across to find out if it is # callable or a simple attribute. request = Request('getattr', {'id':my_id, 'attribute':attribute}) - return my_cp.make_request(request) + return my_cp.make_request(request, attribute=attribute) def __setattr__(self, attribute, value): #_prnt("Proxy::__setattr__: %s" % attribute) @@ -332,7 +331,7 @@ not supported, so you have to know what has been exported. my_id = self.__local_dict['_id'] # Pass the set attribute across request = Request('setattr', {'id':my_id, 'attribute':attribute, 'value':value}) - return my_cp.make_request(request) + return my_cp.make_request(request, attribute=attribute) class ObjectProxy(Proxy): """\ @@ -360,13 +359,13 @@ not need to deal with this class directly.""" my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] request = Request('getitem', {'id':my_id, 'key':key}) - return my_cp.make_request(request) + return my_cp.make_request(request, attribute=key) def __setitem__(self, key, value): my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] request = Request('setitem', {'id':my_id, 'key':key, 'value':value}) - return my_cp.make_request(request) + return my_cp.make_request(request, attribute=key) def __eq__(self, rhs): my_cp = self.__local_dict['_cp'] @@ -429,7 +428,7 @@ and users should not need to deal with this class directly.""" # can safely pass this one to the remote object. #_prnt("calling %s %s" % (self._object_id, self._name) request = Request('call', {'id':self._object_id, 'name':self._name, 'args':args, 'kwargs':kwargs}) - return self._cp.make_request(request) + return self._cp.make_request(request, attribute=self._name) class Server(object): def __init__(self, input, output, export): diff --git a/eventlet/saranwrap_test.py b/eventlet/saranwrap_test.py index 2a43dae..061699c 100644 --- a/eventlet/saranwrap_test.py +++ b/eventlet/saranwrap_test.py @@ -269,7 +269,6 @@ sys_path = sys.path""") shutil.rmtree(temp_dir) sys.path.remove(temp_dir) - def test_contention(self): from eventlet import saranwrap_test prox = saranwrap.wrap(saranwrap_test) @@ -281,6 +280,12 @@ sys_path = sys.path""") waiters.append(pool.execute(lambda: self.assertEquals(prox.three, 3))) for waiter in waiters: waiter.wait() + + def test_list_of_functions(self): + return # this test is known to fail, we can implement it sometime in the future if we wish + from eventlet import saranwrap_test + prox = saranwrap.wrap([saranwrap_test.list_maker]) + self.assertEquals(list_maker(), prox[0]()) def test_detection_of_server_crash(self): # make the server crash here From 468cbd872536ce7868126e400fed976afa923aeb Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Thu, 13 Dec 2007 14:39:23 -0500 Subject: [PATCH 46/79] [svn r68] svn merge -r8:58 https://svn.secondlife.com/svn/eventlet/branches/beta-1 into trunk --- eventlet/coros.py | 1 + eventlet/coros_test.py | 12 ++++++++++++ eventlet/httpd.py | 40 +++++++++++++++++++++++++++++++++++----- eventlet/httpd_test.py | 14 ++++++++++++++ eventlet/runloop.py | 37 +++++++++++++++++++++++++++++++------ eventlet/runloop_test.py | 2 +- eventlet/timer.py | 2 +- eventlet/timer_test.py | 2 +- eventlet/wrappedfd.py | 1 + 9 files changed, 97 insertions(+), 14 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index 9f02312..86b77ac 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -118,6 +118,7 @@ class CoroutinePool(pools.Pool): traceback.print_exc() if evt is not None: evt.send(exc=e) + api.get_hub().runloop.cancel_timers(api.getcurrent()) self.put(sender) def create(self): diff --git a/eventlet/coros_test.py b/eventlet/coros_test.py index 0ea65ef..ee15dd0 100644 --- a/eventlet/coros_test.py +++ b/eventlet/coros_test.py @@ -22,6 +22,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from eventlet import tests +from eventlet import timer from eventlet import coros, api class TestEvent(tests.TestCase): @@ -140,5 +141,16 @@ class TestCoroutinePool(tests.TestCase): done.wait() self.assertEquals(['cons1', 'prod', 'cons2'], results) + def test_timer_cancel(self): + def some_work(): + t = timer.Timer(5, lambda: None) + t.schedule() + return t + pool = coros.CoroutinePool(0, 2) + worker = pool.execute(some_work) + t = worker.wait() + api.sleep(0) + self.assertEquals(t.cancelled, True) + if __name__ == '__main__': tests.main() diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 73b4388..5a78ec4 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -353,6 +353,11 @@ class Request(object): self._cached_parsed_body = body return self._cached_parsed_body + def override_body(self, body): + if not hasattr(self, '_cached_parsed_body'): + self.read_body() ## Read and discard body + self._cached_parsed_body = body + def response_written(self): ## TODO change badly named variable return self._request_started @@ -372,14 +377,16 @@ class Request(object): return "" % ( getattr(self, '_method'), getattr(self, '_path')) - DEFAULT_TIMEOUT = 300 +# This value was chosen because apache 2 has a default limit of 8190. +# I believe that slightly smaller number is because apache does not +# count the \r\n. +MAX_REQUEST_LINE = 8192 class Timeout(RuntimeError): pass - class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): protocol_version = "HTTP/1.1" def __init__(self, request, client_address, server): @@ -403,6 +410,13 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): "%s %d %s" % ( self.protocol_version, self._code, self._message)] + def write_bad_request(self, status, reason): + self.set_response_code(self, status, reason) + self.wfile.write(''.join(self.generate_status_line())) + self.wfile.write('\r\nServer: %s\r\n' % self.version_string()) + self.wfile.write('Date: %s\r\n' % self.date_time_string()) + self.wfile.write('Content-Length: 0\r\n\r\n') + def handle(self): self.close_connection = 0 @@ -412,7 +426,14 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): break cancel = api.exc_after(timeout, Timeout) try: - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(MAX_REQUEST_LINE) + if self.raw_requestline is not None: + if len(self.raw_requestline) == MAX_REQUEST_LINE: + # Someone sent a request line which is too + # large. Be helpful and tell them. + self.write_bad_request(414, 'Request-URI Too Long') + self.close_connection = True + continue except socket.error, e: if e[0] in CONNECTION_CLOSED: self.close_connection = True @@ -421,6 +442,17 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): except Timeout: self.close_connection = True continue + except Exception, e: + try: + if e[0][0][0].startswith('SSL'): + print "SSL Error:", e[0][0] + self.close_connection = True + cancel.cancel() + continue + except Exception, f: + print "Exception in ssl test:",f + pass + raise e cancel.cancel() if not self.raw_requestline or not self.parse_request(): @@ -519,5 +551,3 @@ def server(sock, site, log=None, max_size=512): sock.close() except socket.error: pass - - diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py index 803e84e..85600fe 100644 --- a/eventlet/httpd_test.py +++ b/eventlet/httpd_test.py @@ -136,6 +136,20 @@ class TestHttpd(tests.TestCase): ['-c','64','-n','1024', '-k', url]) print out.read() + def test_006_reject_long_urls(self): + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + path_parts = [] + for ii in range(3000): + path_parts.append('path') + path = '/'.join(path_parts) + request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path + sock.write(request) + result = sock.readline() + status = result.split(' ')[1] + print "status:",status + self.assertEqual(status, '414') + sock.close() if __name__ == '__main__': tests.main() diff --git a/eventlet/runloop.py b/eventlet/runloop.py index 252e3c9..eedd76d 100644 --- a/eventlet/runloop.py +++ b/eventlet/runloop.py @@ -32,6 +32,8 @@ import bisect import sys import traceback +import greenlet + from eventlet.timer import Timer @@ -48,6 +50,7 @@ class RunLoop(object): self.stopping = False self.running = False self.timers = [] + self.timers_by_greenlet = {} self.next_timers = [] self.observers = {} self.observer_modes = { @@ -164,8 +167,14 @@ class RunLoop(object): def add_timer(self, timer): scheduled_time = self.clock() + timer.seconds self._add_absolute_timer(scheduled_time, timer) + current_greenlet = greenlet.getcurrent() + if current_greenlet not in self.timers_by_greenlet: + self.timers_by_greenlet[current_greenlet] = {} + self.timers_by_greenlet[current_greenlet][timer] = True + timer.greenlet = current_greenlet return scheduled_time - + + def prepare_timers(self): ins = bisect.insort_right t = self.timers @@ -192,9 +201,25 @@ class RunLoop(object): for i in xrange(last): timer = t[i][2] try: - timer() - except self.SYSTEM_EXCEPTIONS: - raise - except: - self.squelch_timer_exception(timer, sys.exc_info()) + try: + timer() + except self.SYSTEM_EXCEPTIONS: + raise + except: + self.squelch_timer_exception(timer, sys.exc_info()) + finally: + try: + del self.timers_by_greenlet[timer.greenlet][timer] + except KeyError: + pass del t[:last] + + def cancel_timers(self, greenlet): + for timer in self.timers_by_greenlet[greenlet]: + if timer.seconds: + ## If timer.seconds is 0, this isn't a timer, it's + ## actually eventlet's silly way of specifying whether + ## a coroutine is "ready to run" or not. + timer.cancel() + del self.timers_by_greenlet[greenlet] + diff --git a/eventlet/runloop_test.py b/eventlet/runloop_test.py index 90e7293..4827c32 100644 --- a/eventlet/runloop_test.py +++ b/eventlet/runloop_test.py @@ -1,5 +1,5 @@ """\ -@file test_runloop.py +@file runloop_test.py @author Donovan Preston Copyright (c) 2006-2007, Linden Research, Inc. diff --git a/eventlet/timer.py b/eventlet/timer.py index 2613a61..f261b7d 100644 --- a/eventlet/timer.py +++ b/eventlet/timer.py @@ -25,7 +25,7 @@ THE SOFTWARE. from eventlet.api import get_hub class Timer(object): - __slots__ = ['seconds', 'tpl', 'called', 'cancelled', 'scheduled_time'] + __slots__ = ['seconds', 'tpl', 'called', 'cancelled', 'scheduled_time', 'greenlet'] def __init__(self, seconds, cb, *args, **kw): """Create a timer. seconds: The minimum number of seconds to wait before calling diff --git a/eventlet/timer_test.py b/eventlet/timer_test.py index f9a1e22..496a884 100644 --- a/eventlet/timer_test.py +++ b/eventlet/timer_test.py @@ -1,5 +1,5 @@ """\ -@file test_timer.py +@file timer_test.py @author Donovan Preston Copyright (c) 2006-2007, Linden Research, Inc. diff --git a/eventlet/wrappedfd.py b/eventlet/wrappedfd.py index 74811a2..2adf37a 100644 --- a/eventlet/wrappedfd.py +++ b/eventlet/wrappedfd.py @@ -212,6 +212,7 @@ class wrapped_fd(object): break buf += d chunk, self.recvbuffer = buf[:size], buf[size:] + return chunk def readline(self, size=None): return self.readuntil(self.newlines, size=size) From dfc9065e1f510190f76969298a7cb89f621417da Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 13 Dec 2007 18:20:22 -0500 Subject: [PATCH 47/79] [svn r69] Make sure that loaders and dumpers are used everywhere. Don't wrap a loader with make_safe_loader more than once. --- eventlet/httpc.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 3567830..7744259 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -474,7 +474,7 @@ class HttpSuite(object): if self.loader is not None: try: - body = self.loader(body) + body = make_safe_loader(self.loader(body)) except KeyboardInterrupt: raise except Exception, e: @@ -504,6 +504,7 @@ class HttpSuite(object): def head_(self, url, headers=None, use_proxy=False, ok=None, aux=None): return self.request_(_Params(url, 'HEAD', headers=headers, + loader=self.loader, dumper=self.dumper, use_proxy=use_proxy, ok=ok, aux=aux)) def head(self, *args, **kwargs): @@ -514,7 +515,7 @@ class HttpSuite(object): headers = {} headers['accept'] = self.fallback_content_type+';q=1,*/*;q=0' return self.request_(_Params(url, 'GET', headers=headers, - loader=make_safe_loader(self.loader), + loader=self.loader, dumper=self.dumper, use_proxy=use_proxy, ok=ok, aux=aux)) def get(self, *args, **kwargs): @@ -531,13 +532,15 @@ class HttpSuite(object): headers['content-type'] = content_type headers['accept'] = headers['content-type']+';q=1,*/*;q=0' return self.request_(_Params(url, 'PUT', body=data, headers=headers, + loader=self.loader, dumper=self.dumper, ok=ok, aux=aux)) def put(self, *args, **kwargs): return self.put_(*args, **kwargs)[-1] def delete_(self, url, ok=None, aux=None): - return self.request_(_Params(url, 'DELETE', ok=ok, aux=aux)) + return self.request_(_Params(url, 'DELETE', loader=self.loader, + dumper=self.dumper, ok=ok, aux=aux)) def delete(self, *args, **kwargs): return self.delete_(*args, **kwargs)[-1] @@ -553,8 +556,8 @@ class HttpSuite(object): headers['content-type'] = content_type headers['accept'] = headers['content-type']+';q=1,*/*;q=0' return self.request_(_Params(url, 'POST', body=data, - headers=headers, dumper=self.dumper, - loader=self.loader, ok=ok, aux=aux)) + headers=headers, loader=self.loader, + dumper=self.dumper, ok=ok, aux=aux)) def post(self, *args, **kwargs): return self.post_(*args, **kwargs)[-1] From c49670da8b9bfef21b3f8964546ad17777303e6e Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Thu, 13 Dec 2007 19:34:51 -0500 Subject: [PATCH 48/79] [svn r70] Handle status code 504 --- eventlet/httpc.py | 9 +++++++++ eventlet/httpc_test.py | 27 +++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 7744259..bec63a3 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -340,11 +340,19 @@ class Gone(ConnectionError): """ 410 Gone """ pass + class ServiceUnavailable(Retriable): """ 503 Service Unavailable """ def url(self): return self.params._delegate.url + +class GatewayTimeout(Retriable): + """ 504 Gateway Timeout """ + def url(self): + return self.params._delegate.url + + class InternalServerError(ConnectionError): """ 500 Internal Server Error """ def __repr__(self): @@ -382,6 +390,7 @@ status_to_error_map = { 410: Gone, 500: InternalServerError, 503: ServiceUnavailable, + 504: GatewayTimeout, } scheme_to_factory_map = { diff --git a/eventlet/httpc_test.py b/eventlet/httpc_test.py index c3f0c97..b0e14f0 100644 --- a/eventlet/httpc_test.py +++ b/eventlet/httpc_test.py @@ -336,6 +336,13 @@ class Site500(BasicSite): req.response(500, body="screw you world") return + +class Site500(BasicSite): + def handle_request(self, req): + req.response(500, body="screw you world") + return + + class TestHttpc500(TestBase, tests.TestCase): site_class = Site500 @@ -351,7 +358,27 @@ class TestHttpc500(TestBase, tests.TestCase): self.assertEquals(e.params.response_body, data) self.assert_(str(e).count(data)) self.assert_(repr(e).count(data)) + + +class Site504(BasicSite): + def handle_request(self, req): + req.response(504, body="screw you world") + +class TestHttpc504(TestBase, tests.TestCase): + site_class = Site504 + + def base_url(self): + return 'http://localhost:31337/' + + def test_post(self): + # Simply ensure that a 504 status code results in a + # GatewayTimeout. Don't bother retrying. + data = 'hello world' + self.assertRaises(httpc.GatewayTimeout, + lambda: httpc.post(self.base_url(), data=data)) + + class TestHttpTime(tests.TestCase): rfc1123_time = 'Sun, 06 Nov 1994 08:49:37 GMT' rfc850_time = 'Sunday, 06-Nov-94 08:49:37 GMT' From afd96c15b899d74a0ff746d524183b84b90801b3 Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Fri, 14 Dec 2007 13:48:11 -0500 Subject: [PATCH 49/79] [svn r71] The fix for a problem raised by 'mark' in [sldev] help with beta-1; we never noticed this because every thread we started had a timer created in it. --- eventlet/runloop.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/eventlet/runloop.py b/eventlet/runloop.py index eedd76d..fb93549 100644 --- a/eventlet/runloop.py +++ b/eventlet/runloop.py @@ -215,6 +215,8 @@ class RunLoop(object): del t[:last] def cancel_timers(self, greenlet): + if greenlet not in self.timers_by_greenlet: + return for timer in self.timers_by_greenlet[greenlet]: if timer.seconds: ## If timer.seconds is 0, this isn't a timer, it's From 23ea81dabac183cea66859453b111ac98ed41e65 Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Thu, 3 Jan 2008 20:12:30 -0500 Subject: [PATCH 50/79] [svn r72] Changed the httpd.server() method to allow passing in the "serv" as a keyword argument, so that code that wants to get access to the "serv" object (e.g. to replay requests as CHTTP wants to do) can do so easily. This should be a trivial change, with no possibilities of error *grin* --- eventlet/httpd.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 5a78ec4..28dff04 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -531,9 +531,10 @@ class Server(BaseHTTPServer.HTTPServer): '%s - - [%s] "%s" %s %s %.6f\n' % args) -def server(sock, site, log=None, max_size=512): +def server(sock, site, log=None, max_size=512,serv=None): pool = coros.CoroutinePool(max_size=max_size) - serv = Server(sock, sock.getsockname(), site, log) + if serv is None: + serv = Server(sock, sock.getsockname(), site, log) try: serv.log.write("httpd starting up on %s\n" % (sock.getsockname(), )) while True: From 5ce2b2eed4f809a0f7e1348f3b8a067d7a5d0a0b Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Thu, 3 Jan 2008 21:53:54 -0500 Subject: [PATCH 51/79] [svn r73] a native-thread pool for eventlet. This allows non-eventlet Python code to be run on Python native threads, so things like databsae connectors can run concurrently with Python code. --- eventlet/tpool.py | 113 +++++++++++++++++++++++++++++++++++++++++ eventlet/tpool_test.py | 60 ++++++++++++++++++++++ 2 files changed, 173 insertions(+) create mode 100644 eventlet/tpool.py create mode 100644 eventlet/tpool_test.py diff --git a/eventlet/tpool.py b/eventlet/tpool.py new file mode 100644 index 0000000..0f0719b --- /dev/null +++ b/eventlet/tpool.py @@ -0,0 +1,113 @@ +"""\ +@file tpool.py + +Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007, IBM Corp. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os, socket, time, threading +import Queue + +from sys import stdout +from Queue import Empty, Queue + +from eventlet import api, coros, httpc, httpd, util, wrappedfd + +_rpipe, _wpipe = os.pipe() +_rfile = os.fdopen(_rpipe,"r",0) +_wrap_rfile = wrappedfd.wrapped_file(_rfile) +util.set_nonblocking(_rfile) + +def _signal_t2e(): + nwritten = os.write(_wpipe,' ') + +_reqq = Queue(maxsize=-1) +_rspq = Queue(maxsize=-1) + +def trampoline(): + global _reqq, _rspq + while(True): + _c = _wrap_rfile.read(1) + assert(_c != "") + while not _rspq.empty(): + try: + (e,rv) = _rspq.get(block=False) + e.send(rv) + except Empty: + pass + +def esend(meth,*args, **kwargs): + global _reqq, _rspq + e = coros.event() + _reqq.put((e,meth,args,kwargs)) + return e + +def tworker(): + global _reqq, _rspq + while(True): + (e,meth,args,kwargs) = _reqq.get() + rv = None + try: + rv = meth(*args,**kwargs) + except Exception,exn: + rv = exn + _rspq.put((e,rv)) + _signal_t2e() + +def erecv(e): + rv = e.wait() + if isinstance(rv,Exception): + raise rv + return rv + +def erpc(meth,*args, **kwargs): + e = esend(meth,*args,**kwargs) + rv = erecv(e) + return rv + +class Proxy(object): + """ a simple proxy-wrapper of any object that comes with a methods-only interface, + in order to forward every method invocation onto a thread in the native-thread pool. + A key restriction is that the object's methods cannot call into eventlets, since the + eventlet dispatcher runs on a different native thread. This is for running native-threaded + code only. """ + def __init__(self, obj,autowrap=()): + self._obj = obj + self._autowrap = autowrap + + def __getattr__(self,attr_name): + f = getattr(self._obj,attr_name) + if not callable(f): + return f + def doit(*args, **kwargs): + rv = erpc(f,*args,**kwargs) + if type(rv) in self._autowrap: + return Proxy(rv) + else: + return rv + return doit + +_nthreads = 20 +_threads = {} +def setup(): + global _threads + for i in range(0,_nthreads): + _threads[i] = threading.Thread(target=tworker) + _threads[i].setDaemon(True) + _threads[i].start() + + api.spawn(trampoline) + +setup() diff --git a/eventlet/tpool_test.py b/eventlet/tpool_test.py new file mode 100644 index 0000000..61501bd --- /dev/null +++ b/eventlet/tpool_test.py @@ -0,0 +1,60 @@ +"""\ +@file tpool_test.py + +Copyright (c) 2007, Linden Research, Inc. +Copyright (c) 2007, IBM Corp. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os, socket, time, threading +from eventlet import coros, api, tpool + +from eventlet.tpool import erpc +from sys import stdout + +import random +r = random.WichmannHill() + +class yadda(object): + def __init__(self): + pass + + def foo(self,when,n=None): + assert(n is not None) + print "foo: %s, %s" % (when,n) + time.sleep(r.random()) + return n + +def sender_loop(pfx): + n = 0 + obj = tpool.Proxy(yadda()) + while True: + api.sleep(0) + now = time.time() + print "%s: send (%s,%s)" % (pfx,now,n) + rv = obj.foo(now,n=n) + print "%s: recv %s" % (pfx, rv) + assert(n == rv) + api.sleep(0.5) + n += 1 + +def test1(): + pool = coros.CoroutinePool(max_size=10) + waiters = [] + for i in range(0,9): + waiters.append(pool.execute(sender_loop,i)) + for waiter in waiters: + waiter.wait() + +test1() From 88fb5ecc1202a2f97c5ab4b709d05ed6a12f2708 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Fri, 4 Jan 2008 22:44:40 -0500 Subject: [PATCH 52/79] [svn r74] make a subclass of db_pool that uses tpool.Proxy instead of saranwrap. Unit tests for all possible combinations thereof. --- eventlet/db_pool.py | 62 +++++++++++++++++++++++++++------------- eventlet/db_pool_test.py | 21 +++++++++++--- 2 files changed, 59 insertions(+), 24 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index f8b437c..5769412 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -33,12 +33,15 @@ class DatabaseConnector(object): """\ @brief This is an object which will maintain a collection of database connection pools keyed on host,databasename""" - def __init__(self, module, credentials, min_size = 0, max_size = 4, *args, **kwargs): + def __init__(self, module, credentials, min_size = 0, max_size = 4, conn_pool=None, *args, **kwargs): """\ @brief constructor @param min_size the minimum size of a child pool. @param max_size the maximum size of a child pool.""" assert(module) + self._conn_pool_class = conn_pool + if self._conn_pool_class is None: + self._conn_pool_class = ConnectionPool self._module = module self._min_size = min_size self._max_size = max_size @@ -60,33 +63,35 @@ connection pools keyed on host,databasename""" new_kwargs['db'] = dbname new_kwargs['host'] = host new_kwargs.update(self.credentials_for(host)) - dbpool = ConnectionPool(self._module, self._min_size, self._max_size, *self._args, **new_kwargs) + dbpool = self._conn_pool_class(self._module, self._min_size, self._max_size, *self._args, **new_kwargs) self._databases[key] = dbpool return self._databases[key] - -class ConnectionPool(Pool): - """A pool which gives out saranwrapped database connections from a pool - """ - def __init__(self, module, min_size = 0, max_size = 4, *args, **kwargs): - assert(module) - self._module = module +class BaseConnectionPool(Pool): + # *TODO: we need to expire and close connections if they've been + # idle for a while, so that system-wide connection count doesn't + # monotonically increase forever + def __init__(self, db_module, min_size = 0, max_size = 4, *args, **kwargs): + assert(db_module) + self._db_module = db_module self._args = args self._kwargs = kwargs - super(ConnectionPool, self).__init__(min_size, max_size) + super(BaseConnectionPool, self).__init__(min_size, max_size) - def create(self): - return saranwrap.wrap(self._module).connect(*self._args, **self._kwargs) + def get(self): + # wrap the connection for easier use + conn = super(BaseConnectionPool, self).get() + return PooledConnectionWrapper(conn, self) def put(self, conn): # rollback any uncommitted changes, so that the next client - # has a clean slate. This also pokes the process to see if + # has a clean slate. This also pokes the connection to see if # it's dead or None try: conn.rollback() except AttributeError, e: - # this means it's already been destroyed + # this means it's already been destroyed, so we don't need to print anything conn = None except: # we don't care what the exception was, we just know the @@ -104,14 +109,32 @@ class ConnectionPool(Pool): conn = None if conn is not None: - super(ConnectionPool, self).put(conn) + super(BaseConnectionPool, self).put(conn) else: self.current_size -= 1 + - def get(self): - # wrap the connection for easier use - conn = super(ConnectionPool, self).get() - return PooledConnectionWrapper(conn, self) +class SaranwrappedConnectionPool(BaseConnectionPool): + """A pool which gives out saranwrapped database connections from a pool + """ + def create(self): + return saranwrap.wrap(self._db_module).connect(*self._args, **self._kwargs) + +class TpooledConnectionPool(BaseConnectionPool): + """A pool which gives out tpool.Proxy-based database connections from a pool. + """ + def create(self): + from eventlet import tpool + try: + # *FIX: this is a huge hack that will probably only work for MySQLdb + autowrap = (self._db_module.cursors.DictCursor,) + except: + autowrap = () + return tpool.Proxy(self._db_module.connect(*self._args, **self._kwargs), + autowrap=autowrap) + +# default connection pool is the tpool one +ConnectionPool = TpooledConnectionPool class GenericConnectionWrapper(object): @@ -188,4 +211,3 @@ class PooledConnectionWrapper(GenericConnectionWrapper): def __del__(self): self.close() - diff --git a/eventlet/db_pool_test.py b/eventlet/db_pool_test.py index 628beaa..499b723 100644 --- a/eventlet/db_pool_test.py +++ b/eventlet/db_pool_test.py @@ -79,9 +79,6 @@ class TestDBConnectionPool(DBTester): self.pool.put(self.connection) super(TestDBConnectionPool, self).tearDown() - def create_pool(self, max_items = 1): - return db_pool.ConnectionPool(self._dbmodule, 0, max_items, **self._auth) - def assert_cursor_works(self, cursor): cursor.execute("show full processlist") rows = cursor.fetchall() @@ -253,7 +250,17 @@ class TestDBConnectionPool(DBTester): results.sort() self.assertEqual([1, 2], results) -class TestMysqlConnectionPool(TestDBConnectionPool, unittest.TestCase): + +class TestTpoolConnectionPool(TestDBConnectionPool): + def create_pool(self, max_items = 1): + return db_pool.TpooledConnectionPool(self._dbmodule, 0, max_items, **self._auth) + + +class TestSaranwrapConnectionPool(TestDBConnectionPool): + def create_pool(self, max_items = 1): + return db_pool.SaranwrappedConnectionPool(self._dbmodule, 0, max_items, **self._auth) + +class TestMysqlConnectionPool(object): def setUp(self): import MySQLdb self._dbmodule = MySQLdb @@ -286,6 +293,12 @@ class TestMysqlConnectionPool(TestDBConnectionPool, unittest.TestCase): db.close() del db +class TestMysqlTpool(TestMysqlConnectionPool, TestTpoolConnectionPool, unittest.TestCase): + pass + +class TestMysqlSaranwrap(TestMysqlConnectionPool, TestSaranwrapConnectionPool, unittest.TestCase): + pass + if __name__ == '__main__': unittest.main() From e82ea35461558ccbdbc28b19ec8e6f3a8818efa6 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 9 Jan 2008 19:10:39 -0500 Subject: [PATCH 53/79] [svn r75] Ran reindent.py over eventlet, it caught some places where the indentation was incorrect. Also change tpool_test to complete in finite time. --- eventlet/jsonhttp.py | 6 +++--- eventlet/pools.py | 8 ++++---- eventlet/saranwrap.py | 8 ++++---- eventlet/tpool_test.py | 4 ++-- eventlet/wrappedfd.py | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/eventlet/jsonhttp.py b/eventlet/jsonhttp.py index 7c84af5..2fc0e54 100644 --- a/eventlet/jsonhttp.py +++ b/eventlet/jsonhttp.py @@ -28,9 +28,9 @@ import simplejson def safe_load(what): - if not what: - return None - return simplejson.loads(what) + if not what: + return None + return simplejson.loads(what) suite = httpc.HttpSuite(simplejson.dumps, safe_load, 'application/json') diff --git a/eventlet/pools.py b/eventlet/pools.py index 7880a79..4f8ccb1 100644 --- a/eventlet/pools.py +++ b/eventlet/pools.py @@ -161,10 +161,10 @@ class ConnectionPool(Pool): together to do HTTP keepalive sockets without errors. """ def __init__(self, proto, netloc, use_proxy, min_size=0, max_size=4): - self.proto = proto - self.netloc = netloc - self.use_proxy = use_proxy - Pool.__init__(self, min_size, max_size) + self.proto = proto + self.netloc = netloc + self.use_proxy = use_proxy + Pool.__init__(self, min_size, max_size) def create(self): return httpc.make_connection(self.proto, self.netloc, self.use_proxy) diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index 3b081a5..14e0a37 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -309,11 +309,11 @@ not supported, so you have to know what has been exported. _dead_list = my_cp._dead_list for dead_object in _dead_list.copy(): - request = Request('del', {'id':dead_object}) + request = Request('del', {'id':dead_object}) - my_cp.make_request(request) - _dead_list.remove(dead_object) - + my_cp.make_request(request) + _dead_list.remove(dead_object) + # Pass all public attributes across to find out if it is # callable or a simple attribute. request = Request('getattr', {'id':my_id, 'attribute':attribute}) diff --git a/eventlet/tpool_test.py b/eventlet/tpool_test.py index 61501bd..0af2e23 100644 --- a/eventlet/tpool_test.py +++ b/eventlet/tpool_test.py @@ -39,14 +39,14 @@ class yadda(object): def sender_loop(pfx): n = 0 obj = tpool.Proxy(yadda()) - while True: + while n < 10: api.sleep(0) now = time.time() print "%s: send (%s,%s)" % (pfx,now,n) rv = obj.foo(now,n=n) print "%s: recv %s" % (pfx, rv) assert(n == rv) - api.sleep(0.5) + api.sleep(0) n += 1 def test1(): diff --git a/eventlet/wrappedfd.py b/eventlet/wrappedfd.py index 2adf37a..a5ab0aa 100644 --- a/eventlet/wrappedfd.py +++ b/eventlet/wrappedfd.py @@ -244,7 +244,7 @@ class wrapped_fd(object): def read(self, size=None): if size is not None and not isinstance(size, (int, long)): - raise TypeError('Expecting an int or long for size, got %s: %s' % (type(size), repr(size))) + raise TypeError('Expecting an int or long for size, got %s: %s' % (type(size), repr(size))) buf, self.recvbuffer = self.recvbuffer, '' lst = [buf] if size is None: From 9f28a79ad2c7156b4478e163afaf5531e1b5e6c1 Mon Sep 17 00:00:00 2001 From: "tess.linden" Date: Fri, 18 Jan 2008 20:34:49 -0500 Subject: [PATCH 54/79] [svn r77] Merging up beta-1 and removing the branch from svn svn merge -r75:76 https://svn.secondlife.com/svn/eventlet/branches/beta-1 svn delete https://svn.secondlife.com/svn/eventlet/branches/beta-1 --- eventlet/httpc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index bec63a3..938e8ae 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -259,14 +259,15 @@ class ConnectionError(Exception): class UnparseableResponse(ConnectionError): """Raised when a loader cannot parse the response from the server.""" - def __init__(self, content_type, response): + def __init__(self, content_type, response, url): self.content_type = content_type self.response = response + self.url = url Exception.__init__(self) def __repr__(self): - return "UnparseableResponse(%r, %r)" % ( - self.content_type, self.response) + return "Could not parse the data at the URL %r of content-type %r\nData:\n%r)" % ( + self.url, self.content_type, self.response) __str__ = __repr__ @@ -487,7 +488,7 @@ class HttpSuite(object): except KeyboardInterrupt: raise except Exception, e: - raise UnparseableResponse(self.loader, body) + raise UnparseableResponse(self.loader, body, params.url) return response.status, response.msg, body From e7d02382860127fa3aee5a409b8b0829d8fb8d6a Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Sun, 20 Jan 2008 16:57:05 -0500 Subject: [PATCH 55/79] [svn r78] Added actor class (a free-running message receiver as discussed here: http://lists.secondlife.com/pipermail/chttpdev/2007-December/000042.html) and some tests. --- eventlet/coros.py | 76 ++++++++++++++++++++++++++++++++++++++++++ eventlet/coros_test.py | 63 ++++++++++++++++++++++++++++++++++ 2 files changed, 139 insertions(+) diff --git a/eventlet/coros.py b/eventlet/coros.py index 86b77ac..c8784be 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -22,6 +22,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import collections import time import traceback @@ -174,3 +175,78 @@ class pipe(object): buf, self._buffer = self._buffer[:num], self._buffer[num:] return buf + +class Actor(object): + """ A free-running coroutine that accepts and processes messages. + + Kind of the equivalent of an Erlang process, really. It processes + a queue of messages in the order that they were sent. You must + subclass this and implement your own version of receive(). + + The actor's reference count will never drop to zero while the + coroutine exists; if you lose all references to the actor object + it will never be freed. + """ + def __init__(self): + """ Constructs an Actor, kicking off a new coroutine to process the messages. """ + self._mailbox = collections.deque() + self._event = event() + self._killer = api.spawn(self.run_forever) + + def run_forever(self): + """ Loops forever, continually checking the mailbox. """ + while True: + if not self._mailbox: + self._event.wait() + self._event.reset() + else: + # leave the message in the mailbox until after it's + # been processed so the event doesn't get triggered + # while in the received method + self.received(self._mailbox[0]) + self._mailbox.popleft() + + def cast(self, message): + """ Send a message to the actor. + + If the actor is busy, the message will be enqueued for later + consumption. There is no return value. + + >>> a = Actor() + >>> a.received = lambda msg: msg + >>> a.cast("hello") + """ + self._mailbox.append(message) + # if this is the only message, the coro could be waiting + if len(self._mailbox) == 1: + self._event.send() + + def received(self, message): + """ Called to process each incoming message. + + The default implementation just raises an exception, so + replace it with something useful! + + >>> class Greeter(Actor): + ... def received(self, message): + ... print "received", message + ... + >>> a = Greeter() + >>> a.cast("message 1") + >>> api.sleep(0) # need to explicitly yield to cause the actor to run + received message 1 + >>> a.cast("message 2") + >>> a.cast("message 3") + >>> api.sleep(0) + received message 2 + received message 3 + """ + raise NotImplementedError() + +def _test(): + print "Running doctests. There will be no further output if they succeed." + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() diff --git a/eventlet/coros_test.py b/eventlet/coros_test.py index ee15dd0..0675c3e 100644 --- a/eventlet/coros_test.py +++ b/eventlet/coros_test.py @@ -152,5 +152,68 @@ class TestCoroutinePool(tests.TestCase): api.sleep(0) self.assertEquals(t.cancelled, True) +class IncrActor(coros.Actor): + def received(self, message): + self.value = getattr(self, 'value', 0) + 1 + +class TestActor(tests.TestCase): + mode = 'static' + def setUp(self): + # raise an exception if we're waiting forever + self._cancel_timeout = api.exc_after(1, RuntimeError()) + self.actor = IncrActor() + + def tearDown(self): + self._cancel_timeout.cancel() + api.kill(self.actor._killer) + + def test_cast(self): + self.actor.cast(1) + api.sleep(0) + self.assertEqual(self.actor.value, 1) + self.actor.cast(1) + api.sleep(0) + self.assertEqual(self.actor.value, 2) + + def test_cast_multi_1(self): + # make sure that both messages make it in there + self.actor.cast(1) + self.actor.cast(1) + api.sleep(0) + self.assertEqual(self.actor.value, 2) + + def test_cast_multi_2(self): + # the actor goes through a slightly different code path if it + # is forced to enter its event loop prior to any cast()s + api.sleep(0) + self.test_cast_multi_1() + + def test_sleeping_during_received(self): + # ensure that even if the received method cooperatively + # yields, eventually all messages are delivered + msgs = [] + waiters = [] + def received(message): + evt = coros.event() + waiters.append(evt) + api.sleep(0) + msgs.append(message) + evt.send() + self.actor.received = received + + self.actor.cast(1) + api.sleep(0) + self.actor.cast(2) + self.actor.cast(3) + api.sleep(0) + self.actor.cast(4) + self.actor.cast(5) + for evt in waiters: + evt.wait() + self.assertEqual(msgs, [1,2,3,4,5]) + + + + if __name__ == '__main__': tests.main() From da1b6144554c70171f7782d18c43ea7cdaaad923 Mon Sep 17 00:00:00 2001 From: "sardonyx.linden" Date: Mon, 21 Jan 2008 19:40:16 -0500 Subject: [PATCH 56/79] [svn r79] Fix FileScheme's exception raising code. --- eventlet/httpc.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 938e8ae..da3975a 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -174,9 +174,7 @@ class FileScheme(object): def raise_connection_error(self, klass=None): if klass is None: klass=ConnectionError - raise klass( - self.method, self.host, self.port, - self.path, self.status, self.reason, '') + raise klass(_Params('file://' + self.path, self.method)) def close(self): """We're challenged here, and read the whole file rather than From 9ec1cab84258f8d338977e2ee37e6a4e7e86ffdb Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 23 Jan 2008 19:14:38 -0500 Subject: [PATCH 57/79] [svn r80] DEV-9309: Support python object copy through saranwrap. Also tweaked comment on event.send to better reflect actual semantics. --- eventlet/coros.py | 4 +- eventlet/saranwrap.py | 84 +++++++++++++++++++++++--------------- eventlet/saranwrap_test.py | 13 ++++++ 3 files changed, 65 insertions(+), 36 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index c8784be..a54b22e 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -83,8 +83,8 @@ class event(object): 0, greenlib.switch, waiter, None, Cancelled()) def send(self, result=None, exc=None): - """Resume all previous and further - calls to wait() with result. + """Makes arrangements for the waiters to be woken with the + result and then returns immediately to the parent. """ assert self._result is NOT_USED self._result = result diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index 14e0a37..b3d6744 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -397,6 +397,19 @@ not need to deal with this class directly.""" # see description for __repr__, len(obj) is the same. return self.__len__() + def __deepcopy__(self, memo=None): + """Copies the entire external object and returns its + value. Will only work if the remote object is pickleable.""" + my_cp = self.__local_dict['_cp'] + my_id = self.__local_dict['_id'] + request = Request('copy', {'id':my_id}) + return my_cp.make_request(request) + + # since the remote object is being serialized whole anyway, + # there's no semantic difference between copy and deepcopy + __copy__ = __deepcopy__ + + def proxied_type(self): if type(self) is not ObjectProxy: return type(self) @@ -444,60 +457,60 @@ when the id is None.""" self._next_id = 1 self._objects = {} - def handle_status(self, object, req): + def handle_status(self, obj, req): return { 'object_count':len(self._objects), 'next_id':self._next_id, 'pid':os.getpid()} - def handle_getattr(self, object, req): + def handle_getattr(self, obj, req): try: - return getattr(object, req['attribute']) + return getattr(obj, req['attribute']) except AttributeError, e: - if hasattr(object, "__getitem__"): - return object[req['attribute']] + if hasattr(obj, "__getitem__"): + return obj[req['attribute']] else: raise e #_log('getattr: %s' % str(response)) - def handle_setattr(self, object, req): + def handle_setattr(self, obj, req): try: - return setattr(object, req['attribute'], req['value']) + return setattr(obj, req['attribute'], req['value']) except AttributeError, e: - if hasattr(object, "__setitem__"): - return object.__setitem__(req['attribute'], req['value']) + if hasattr(obj, "__setitem__"): + return obj.__setitem__(req['attribute'], req['value']) else: raise e - def handle_getitem(self, object, req): - return object[req['key']] + def handle_getitem(self, obj, req): + return obj[req['key']] - def handle_setitem(self, object, req): - object[req['key']] = req['value'] + def handle_setitem(self, obj, req): + obj[req['key']] = req['value'] return None # *TODO figure out what the actual return value of __setitem__ should be - def handle_eq(self, object, req): - #_log("__eq__ %s %s" % (object, req)) + def handle_eq(self, obj, req): + #_log("__eq__ %s %s" % (obj, req)) rhs = None try: rhs = self._objects[req['rhs']] except KeyError, e: return False - return (object == rhs) + return (obj == rhs) - def handle_call(self, object, req): + def handle_call(self, obj, req): #_log("calling %s " % (req['name'])) try: - fn = getattr(object, req['name']) + fn = getattr(obj, req['name']) except AttributeError, e: - if hasattr(object, "__setitem__"): - fn = object[req['name']] + if hasattr(obj, "__setitem__"): + fn = obj[req['name']] else: raise e return fn(*req['args'],**req['kwargs']) - def handle_del(self, object, req): + def handle_del(self, obj, req): id = req['id'] _log("del %s from %s" % (id, self._objects)) @@ -505,11 +518,14 @@ when the id is None.""" del self._objects[id] return None - def handle_type(self, object, req): - return type(object) + def handle_type(self, obj, req): + return type(obj) - def handle_nonzero(self, object, req): - return bool(object) + def handle_nonzero(self, obj, req): + return bool(obj) + + def handle_copy(self, obj, req): + return obj def loop(self): """@brief Loop forever and respond to all requests.""" @@ -524,20 +540,20 @@ when the id is None.""" _log("request: %s (%s)" % (request, self._objects)) req = request id = None - object = None + obj = None try: id = req['id'] if id: id = int(id) - object = self._objects[id] - #_log("id, object: %d %s" % (id, object)) + obj = self._objects[id] + #_log("id, object: %d %s" % (id, obj)) except Exception, e: #_log("Exception %s" % str(e)) pass - if object is None or id is None: + if obj is None or id is None: id = None - object = self._export - #_log("found object %s" % str(object)) + obj = self._export + #_log("found object %s" % str(obj)) # Handle the request via a method with a special name on the server handler_name = 'handle_%s' % request.action() @@ -547,11 +563,11 @@ when the id is None.""" except AttributeError: raise BadRequest, request.action() - response = handler(object, request) + response = handler(obj, request) # figure out what to do with the response, and respond # apprpriately. - if request.action() in ['status', 'type']: + if request.action() in ['status', 'type', 'copy']: # have to handle these specially since we want to # pickle up the actual value and not return a proxy self.respond(['value', response]) @@ -627,7 +643,7 @@ def main(): description="Simple saranwrap.Server wrapper") parser.add_option( '-c', '--child', default=False, action='store_true', - help='Wrap an object serialed via setattr.') + help='Wrap an object serialized via setattr.') parser.add_option( '-m', '--module', type='string', dest='module', default=None, help='a module to load and export.') diff --git a/eventlet/saranwrap_test.py b/eventlet/saranwrap_test.py index 061699c..df0878d 100644 --- a/eventlet/saranwrap_test.py +++ b/eventlet/saranwrap_test.py @@ -281,6 +281,19 @@ sys_path = sys.path""") for waiter in waiters: waiter.wait() + def test_copy(self): + import copy + compound_object = {'a':[1,2,3]} + prox = saranwrap.wrap(compound_object) + def make_assertions(copied): + self.assert_(isinstance(copied, dict)) + self.assert_(isinstance(copied['a'], list)) + self.assertEquals(copied, compound_object) + self.assertNotEqual(id(compound_object), id(copied)) + + make_assertions(copy.copy(prox)) + make_assertions(copy.deepcopy(prox)) + def test_list_of_functions(self): return # this test is known to fail, we can implement it sometime in the future if we wish from eventlet import saranwrap_test From 35e7bf3312035e13c98f316d7436b5bd4a995de8 Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Fri, 25 Jan 2008 17:48:37 -0500 Subject: [PATCH 58/79] [svn r81] Added a RawConnectionPool, which can be used to get -pooled- DB conns with neither saranwrap nor tpool wrappering. --- eventlet/db_pool.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index 5769412..d44fd46 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -133,6 +133,12 @@ class TpooledConnectionPool(BaseConnectionPool): return tpool.Proxy(self._db_module.connect(*self._args, **self._kwargs), autowrap=autowrap) +class RawConnectionPool(BaseConnectionPool): + """A pool which gives out plain database connections from a pool. + """ + def create(self): + return self._db_module.connect(*self._args, **self._kwargs) + # default connection pool is the tpool one ConnectionPool = TpooledConnectionPool From 19e4c1169edc703c46c668b5a9838fcb1e18f927 Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Mon, 28 Jan 2008 13:45:07 -0500 Subject: [PATCH 59/79] [svn r82] Add max_http_version to httpd.server to allow specifying either http 1.0 or 1.1 --- eventlet/httpd.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 28dff04..a5e8760 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -39,6 +39,8 @@ from eventlet import api from eventlet import coros +DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1' + USE_ACCESS_LOG = True @@ -388,13 +390,13 @@ class Timeout(RuntimeError): pass class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): - protocol_version = "HTTP/1.1" def __init__(self, request, client_address, server): self.socket = self.request = self.rfile = self.wfile = request self.client_address = client_address self.server = server self._code = 200 self._message = 'OK' + self.protocol_version = server.max_http_version def set_response_code(self, request, code, message): self._code = code @@ -503,10 +505,11 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): class Server(BaseHTTPServer.HTTPServer): - def __init__(self, socket, address, site, log): + def __init__(self, socket, address, site, log, max_http_version=DEFAULT_MAX_HTTP_VERSION): self.socket = socket self.address = address self.site = site + self.max_http_version = max_http_version if log: self.log = log if hasattr(log, 'info'): @@ -531,10 +534,10 @@ class Server(BaseHTTPServer.HTTPServer): '%s - - [%s] "%s" %s %s %.6f\n' % args) -def server(sock, site, log=None, max_size=512,serv=None): +def server(sock, site, log=None, max_size=512, serv=None, max_http_version=DEFAULT_MAX_HTTP_VERSION): pool = coros.CoroutinePool(max_size=max_size) if serv is None: - serv = Server(sock, sock.getsockname(), site, log) + serv = Server(sock, sock.getsockname(), site, log, max_http_version=max_http_version) try: serv.log.write("httpd starting up on %s\n" % (sock.getsockname(), )) while True: From 6564c33a7e591236ae0fb33da254c50c9fac60d4 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 28 Jan 2008 17:21:28 -0500 Subject: [PATCH 60/79] [svn r83] Reflowed readme and put in references to eventletdev in the code. --- README | 38 +++++++++++++++++++++++++++++++++----- setup.py | 2 +- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/README b/README index 928b161..18094ae 100644 --- a/README +++ b/README @@ -5,6 +5,11 @@ scalability by using non-blocking io while at the same time retaining high programmer usability by using coroutines to make the non-blocking io operations appear blocking at the source code level. +The wiki at http://wiki.secondlife.com/wiki/Eventlet is likely to be a +more current source of information than this README. Questions, +patches, and general discussion go to the eventlet mailing list: +https://lists.secondlife.com/cgi-bin/mailman/listinfo/eventletdev + == requirements === Eventlet runs on Python version 2.3 or greater, with the following dependenceis: @@ -15,8 +20,11 @@ Eventlet runs on Python version 2.3 or greater, with the following dependenceis: * Sorely lacking in documentation * Not enough test coverage -- the goal is 100%, but we are not there yet. -* Eventlet does not currently run on stackless using tasklets, though it is a goal to do so in the future. -* The SSL client does not properly connect to the SSL server, though both client and server interoperate with other SSL implementations (e.g. curl and apache). +* Eventlet does not currently run on stackless using tasklets, though +it is a goal to do so in the future. +* The SSL client does not properly connect to the SSL server, though +both client and server interoperate with other SSL implementations +(e.g. curl and apache). == getting started == @@ -28,11 +36,31 @@ Also, look at the examples in the examples directory. == eventlet history == -eventlet began life as Donovan Preston was talking to Bob Ippolito about coroutine-based non-blocking networking frameworks in Python. Most non-blocking frameworks require you to run the "main loop" in order to perform all network operations, but Donovan wondered if a library written using a trampolining style could get away with transparently running the main loop any time i/o was required, stopping the main loop once no more i/o was scheduled. Bob spent a few days during PyCon 2005 writing a proof-of-concept. He named it eventlet, after the coroutine implementation it used, [[greenlet]]. Donovan began using eventlet as a light-weight network library for his spare-time project Pavel, and also began writing some unittests. +eventlet began life as Donovan Preston was talking to Bob Ippolito +about coroutine-based non-blocking networking frameworks in +Python. Most non-blocking frameworks require you to run the "main +loop" in order to perform all network operations, but Donovan wondered +if a library written using a trampolining style could get away with +transparently running the main loop any time i/o was required, +stopping the main loop once no more i/o was scheduled. Bob spent a few +days during PyCon 2005 writing a proof-of-concept. He named it +eventlet, after the coroutine implementation it used, +[[greenlet]]. Donovan began using eventlet as a light-weight network +library for his spare-time project Pavel, and also began writing some +unittests. * http://svn.red-bean.com/bob/eventlet/trunk/ * http://soundfarmer.com/Pavel/trunk/ -When Donovan started at Linden Lab in May of 2006, he added eventlet as an svn external in the indra/lib/python directory, to be a dependency of the yet-to-be-named [[backbone]] project (at the time, it was named restserv). However, including eventlet as an svn external meant that any time the externally hosted project had hosting issues, Linden developers were not able to perform svn updates. Thus, the eventlet source was imported into the linden source tree at the same location, and became a fork. +When Donovan started at Linden Lab in May of 2006, he added eventlet +as an svn external in the indra/lib/python directory, to be a +dependency of the yet-to-be-named [[backbone]] project (at the time, +it was named restserv). However, including eventlet as an svn external +meant that any time the externally hosted project had hosting issues, +Linden developers were not able to perform svn updates. Thus, the +eventlet source was imported into the linden source tree at the same +location, and became a fork. -Bob Ippolito has ceased working on eventlet and has stated his desire for Linden to take it's fork forward to the open source world as "the" eventlet. +Bob Ippolito has ceased working on eventlet and has stated his desire +for Linden to take its fork forward to the open source world as "the" +eventlet. diff --git a/setup.py b/setup.py index 3a73e41..dd5999c 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup( version='0.2', description='Coroutine-based networking library', author='Linden Lab', - author_email='sldev@lists.secondlife.com', + author_email='eventletdev@lists.secondlife.com', url='http://wiki.secondlife.com/wiki/Eventlet', packages=['eventlet'], install_requires=['greenlet'], From 00fbec616a4def4190bca858945619e10cf4d5f6 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 4 Feb 2008 20:24:31 -0500 Subject: [PATCH 61/79] [svn r84] More documentation and doctests in coros.py. Turned tests.py into a utility that runs all eventlet tests that it can find. I'm much more likely to run all the tests if there's one command to do so. Refactored some of the tests slightly to play nicer with that style of test running. --- eventlet/coros.py | 168 +++++++++++++++++++++++++++++++++++++-- eventlet/db_pool_test.py | 17 ++-- eventlet/tests.py | 46 ++++++++++- eventlet/tpool_test.py | 22 ++--- 4 files changed, 229 insertions(+), 24 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index a54b22e..6c0318c 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -42,6 +42,21 @@ NOT_USED = object() class event(object): """An abstraction where an arbitrary number of coroutines can wait for one event from another. + + Events differ from channels in two ways: + 1) calling send() does not unschedule the current coroutine + 2) send() can only be called once; use reset() to prepare the event for + another send() + They are ideal for communicating return values between coroutines. + + >>> from eventlet import coros, api + >>> evt = coros.event() + >>> def baz(b): + ... evt.send(b + 1) + ... + >>> _ = api.spawn(baz, 3) + >>> evt.wait() + 4 """ _result = None def __init__(self): @@ -49,17 +64,50 @@ class event(object): def reset(self): """ Reset this event so it can be used to send again. - Can only be called after send has been called.""" - assert self._result is not NOT_USED + Can only be called after send has been called. + + >>> from eventlet import coros + >>> evt = coros.event() + >>> evt.send(1) + >>> evt.reset() + >>> evt.send(2) + >>> evt.wait() + 2 + + Calling reset multiple times in a row is an error. + + >>> evt.reset() + >>> evt.reset() + Traceback (most recent call last): + ... + AssertionError: Trying to re-reset() a fresh event. + + """ + assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.' self.epoch = time.time() self._result = NOT_USED self._waiters = {} def wait(self): - """wait until another coroutine calls send. + """Wait until another coroutine calls send. Returns the value the other coroutine passed to - send. Returns immediately if the event has already + send. + + >>> from eventlet import coros, api + >>> evt = coros.event() + >>> def wait_on(): + ... retval = evt.wait() + ... print "waited for", retval + >>> _ = api.spawn(wait_on) + >>> evt.send('result') + >>> api.sleep(0) + waited for result + + Returns immediately if the event has already occured. + + >>> evt.wait() + 'result' """ if self._result is NOT_USED: self._waiters[api.getcurrent()] = True @@ -76,6 +124,36 @@ class event(object): waiter: The greenlet (greenlet.getcurrent()) of the coroutine to cancel + + >>> from eventlet import coros, api + >>> evt = coros.event() + >>> def wait_on(): + ... try: + ... print "received " + evt.wait() + ... except coros.Cancelled, c: + ... print "Cancelled" + ... + >>> waiter = api.spawn(wait_on) + + The cancel call works on coroutines that are in the wait() call. + + >>> api.sleep(0) # enter the wait() + >>> evt.cancel(waiter) + >>> api.sleep(0) # receive the exception + Cancelled + + The cancel is invisible to coroutines that call wait() after cancel() + is called. This is different from send()'s behavior, where the result + is passed to any waiter regardless of the ordering of the calls. + + >>> waiter = api.spawn(wait_on) + >>> api.sleep(0) + + Cancels have no effect on the ability to send() to the event. + + >>> evt.send('stuff') + >>> api.sleep(0) + received stuff """ if waiter in self._waiters: del self._waiters[waiter] @@ -85,8 +163,30 @@ class event(object): def send(self, result=None, exc=None): """Makes arrangements for the waiters to be woken with the result and then returns immediately to the parent. + + >>> from eventlet import coros, api + >>> evt = coros.event() + >>> def waiter(): + ... print 'about to wait' + ... result = evt.wait() + ... print 'waited for', result + >>> _ = api.spawn(waiter) + >>> api.sleep(0) + about to wait + >>> evt.send('a') + >>> api.sleep(0) + waited for a + + It is an error to call send() multiple times on the same event. + + >>> evt.send('whoops') + Traceback (most recent call last): + ... + AssertionError: Trying to re-send() an already-triggered event. + + Use reset() between send()s to reuse an event object. """ - assert self._result is NOT_USED + assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.' self._result = result self._exc = exc hub = api.get_hub() @@ -95,6 +195,18 @@ class event(object): def execute(func, *args, **kw): + """ Executes an operation asynchronously in a new coroutine, returning + an event to retrieve the return value. + + This has the same api as the CoroutinePool.execute method; the only + difference is that this one creates a new coroutine instead of drawing + from a pool. + + >>> from eventlet import coros + >>> evt = coros.execute(lambda a: ('foo', a), 1) + >>> evt.wait() + ('foo', 1) + """ evt = event() def _really_execute(): evt.send(func(*args, **kw)) @@ -103,7 +215,35 @@ def execute(func, *args, **kw): class CoroutinePool(pools.Pool): - """ Like a thread pool, but with coroutines. """ + """ Like a thread pool, but with coroutines. + + Coroutine pools are useful for splitting up tasks or globally controlling + concurrency. You don't retrieve the coroutines directly with get() -- + instead use the execute() and execute_async() methods to run code. + + >>> from eventlet import coros, api + >>> p = coros.CoroutinePool(max_size=2) + >>> def foo(a): + ... print "foo", a + ... + >>> evt = p.execute(foo, 1) + >>> evt.wait() + foo 1 + + Once the pool is exhausted, calling an execute forces a yield. + + >>> p.execute_async(foo, 2) + >>> p.execute_async(foo, 3) + >>> p.free() + 0 + >>> p.execute_async(foo, 4) + foo 2 + foo 3 + + >>> api.sleep(0) + foo 4 + """ + def _main_loop(self, sender): while True: recvd = sender.wait() @@ -139,6 +279,12 @@ class CoroutinePool(pools.Pool): Immediately returns an eventlet.coros.event object which func's result will be sent to when it is available. + + >>> from eventlet import coros + >>> p = coros.CoroutinePool() + >>> evt = p.execute(lambda a: ('foo', a), 1) + >>> evt.wait() + ('foo', 1) """ sender = self.get() receiver = event() @@ -149,7 +295,15 @@ class CoroutinePool(pools.Pool): """Execute func in one of the coroutines maintained by the pool, when one is free. - This version does not provide the return value. + No return value is provided. + >>> from eventlet import coros, api + >>> p = coros.CoroutinePool() + >>> def foo(a): + ... print "foo", a + ... + >>> p.execute_async(foo, 1) + >>> api.sleep(0) + foo 1 """ sender = self.get() sender.send((None, func, args, kw)) diff --git a/eventlet/db_pool_test.py b/eventlet/db_pool_test.py index 499b723..ca057fe 100644 --- a/eventlet/db_pool_test.py +++ b/eventlet/db_pool_test.py @@ -24,11 +24,9 @@ import os.path -from eventlet import api, coros +from eventlet import api, coros, tests from eventlet import db_pool -import unittest - class DBTester(object): def setUp(self): self.create_db() @@ -293,12 +291,19 @@ class TestMysqlConnectionPool(object): db.close() del db -class TestMysqlTpool(TestMysqlConnectionPool, TestTpoolConnectionPool, unittest.TestCase): +class TestMysqlTpool(TestMysqlConnectionPool, TestTpoolConnectionPool, tests.TestCase): pass -class TestMysqlSaranwrap(TestMysqlConnectionPool, TestSaranwrapConnectionPool, unittest.TestCase): +class TestMysqlSaranwrap(TestMysqlConnectionPool, TestSaranwrapConnectionPool, tests.TestCase): pass if __name__ == '__main__': - unittest.main() + try: + import MySQLdb + except ImportError: + print "Unable to import MySQLdb, skipping db_pool_test." + else: + unittest.main() +else: + import MySQLdb diff --git a/eventlet/tests.py b/eventlet/tests.py index 378fa18..cf1827a 100644 --- a/eventlet/tests.py +++ b/eventlet/tests.py @@ -27,16 +27,17 @@ import atexit import errno import os import sys -import unittest +import unittest, doctest TestCase = unittest.TestCase name = getattr(sys.modules['__main__'], '__name__', None) - main = unittest.main +# listing of all non-*_test test methods +eventlet_test_files = ['coros'] def find_command(command): for dir in os.getenv('PATH', '/usr/bin:/usr/sbin').split(os.pathsep): @@ -44,3 +45,44 @@ def find_command(command): if os.access(p, os.X_OK): return p raise IOError(errno.ENOENT, 'Command not found: %r' % command) + +def run_all_tests(test_files = eventlet_test_files): + """ Runs all the unit tests in eventlet, returning immediately after the + first failed test. + + Returns true if the tests all succeeded. This method is really much longer + than it ought to be. + """ + eventlet_dir = os.path.realpath(os.path.dirname(__file__)) + if eventlet_dir not in sys.path: + sys.path.append(eventlet_dir) + + # add all _test files as a policy + import glob + test_files += [os.path.splitext(os.path.basename(x))[0] + for x in glob.glob(os.path.join(eventlet_dir, "*_test.py"))] + test_files.sort() + + for test_file in test_files: + print "-=", test_file, "=-" + try: + test_module = __import__(test_file) + except ImportError: + print "Unable to import %s, skipping" % test_file + continue + + if test_file.endswith('_test'): + # gawd, unittest, why you make it so difficult to just run some tests! + suite = unittest.findTestCases(test_module) + result = unittest.TextTestRunner().run(suite) + if not result.wasSuccessful(): + return False + else: + failures, tests = doctest.testmod(test_module) + if failures: + return False + + return True + +if __name__ == '__main__': + run_all_tests() \ No newline at end of file diff --git a/eventlet/tpool_test.py b/eventlet/tpool_test.py index 0af2e23..e56ade0 100644 --- a/eventlet/tpool_test.py +++ b/eventlet/tpool_test.py @@ -18,7 +18,7 @@ limitations under the License. """ import os, socket, time, threading -from eventlet import coros, api, tpool +from eventlet import coros, api, tpool, tests from eventlet.tpool import erpc from sys import stdout @@ -49,12 +49,16 @@ def sender_loop(pfx): api.sleep(0) n += 1 -def test1(): - pool = coros.CoroutinePool(max_size=10) - waiters = [] - for i in range(0,9): - waiters.append(pool.execute(sender_loop,i)) - for waiter in waiters: - waiter.wait() -test1() +class TestTpool(tests.TestCase): + def test1(self): + pool = coros.CoroutinePool(max_size=10) + waiters = [] + for i in range(0,9): + waiters.append(pool.execute(sender_loop,i)) + for waiter in waiters: + waiter.wait() + + +if __name__ == '__main__': + tests.main() From f18cc0367e9d6d8ae960e9cd48645a5ca7b19cb4 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 4 Feb 2008 20:34:59 -0500 Subject: [PATCH 62/79] [svn r85] Minor bug in refactoring prevented db_pool_test from being run standalone on systems with MySQLdb installed. Also commented out the ab test cause finding the ab binary was not working most of the time, and when it was, it was spewing tons of output all over the place. --- eventlet/db_pool_test.py | 2 +- eventlet/httpd_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eventlet/db_pool_test.py b/eventlet/db_pool_test.py index ca057fe..df1a043 100644 --- a/eventlet/db_pool_test.py +++ b/eventlet/db_pool_test.py @@ -304,6 +304,6 @@ if __name__ == '__main__': except ImportError: print "Unable to import MySQLdb, skipping db_pool_test." else: - unittest.main() + tests.main() else: import MySQLdb diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py index 85600fe..53b5aa9 100644 --- a/eventlet/httpd_test.py +++ b/eventlet/httpd_test.py @@ -129,7 +129,7 @@ class TestHttpd(tests.TestCase): self.assertRaises(ConnectionClosed, read_http, sock) sock.close() - def test_005_run_apachebench(self): + def skip_test_005_run_apachebench(self): url = 'http://localhost:12346/' # ab is apachebench out = processes.Process(tests.find_command('ab'), From da2063fef6061a84cd982ebee571bb2695a8919f Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 4 Feb 2008 20:39:10 -0500 Subject: [PATCH 63/79] [svn r86] Quieted noisy tests. --- eventlet/httpd_test.py | 8 +++++++- eventlet/tpool_test.py | 12 +++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py index 53b5aa9..dcf889e 100644 --- a/eventlet/httpd_test.py +++ b/eventlet/httpd_test.py @@ -28,6 +28,11 @@ from eventlet import httpd from eventlet import processes from eventlet import util +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + util.wrap_socket_with_coroutine_socket() @@ -79,8 +84,9 @@ def read_http(sock): class TestHttpd(tests.TestCase): mode = 'static' def setUp(self): + self.logfile = StringIO() self.killer = api.spawn( - httpd.server, api.tcp_listener(('0.0.0.0', 12346)), Site(), max_size=128) + httpd.server, api.tcp_listener(('0.0.0.0', 12346)), Site(), max_size=128, log=self.logfile) def tearDown(self): api.kill(self.killer) diff --git a/eventlet/tpool_test.py b/eventlet/tpool_test.py index e56ade0..19442bc 100644 --- a/eventlet/tpool_test.py +++ b/eventlet/tpool_test.py @@ -26,13 +26,19 @@ from sys import stdout import random r = random.WichmannHill() +_g_debug = False + +def prnt(msg): + if _g_debug: + print msg + class yadda(object): def __init__(self): pass def foo(self,when,n=None): assert(n is not None) - print "foo: %s, %s" % (when,n) + prnt("foo: %s, %s" % (when,n)) time.sleep(r.random()) return n @@ -42,9 +48,9 @@ def sender_loop(pfx): while n < 10: api.sleep(0) now = time.time() - print "%s: send (%s,%s)" % (pfx,now,n) + prnt("%s: send (%s,%s)" % (pfx,now,n)) rv = obj.foo(now,n=n) - print "%s: recv %s" % (pfx, rv) + prnt("%s: recv %s" % (pfx, rv)) assert(n == rv) api.sleep(0) n += 1 From 96007ae8b377cfcdf8b9d1bd2d9d7e413d9ef5b7 Mon Sep 17 00:00:00 2001 From: "nat.linden" Date: Wed, 13 Feb 2008 15:04:45 -0500 Subject: [PATCH 64/79] [svn r87] Fix bug in wrapped_fd.readuntil() (used by readline() et al.) in which a multi-character terminator (e.g. '\r\n') could be split across read() buffers and unrecognized. Instead of skipping all of 'buf', advance 'checked' to len(buf) minus (len(terminator) - 1). --- eventlet/wrappedfd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eventlet/wrappedfd.py b/eventlet/wrappedfd.py index a5ab0aa..21265fb 100644 --- a/eventlet/wrappedfd.py +++ b/eventlet/wrappedfd.py @@ -194,7 +194,7 @@ class wrapped_fd(object): found += len(terminator) chunk, self.recvbuffer = buf[:found], buf[found:] return chunk - checked = len(buf) + checked = max(0, len(buf) - (len(terminator) - 1)) d = self.recv(BUFFER_SIZE) if not d: break From fea78b65b7a546585b799568c5035a90ab239206 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 13 Feb 2008 22:13:16 -0500 Subject: [PATCH 65/79] [svn r88] Refactored the body-parsing logic that used to be in read_body() into parsed_body() and added some docs. This means that if you were calling read_body expecting to get anything but a string out, you should change to parsed_body() instead. Refactored tests.py a little bit. --- eventlet/db_pool.py | 3 ++- eventlet/httpd.py | 37 +++++++++++++++++++++++++++---------- eventlet/tests.py | 13 +++++++------ 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index d44fd46..189c446 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -63,7 +63,8 @@ connection pools keyed on host,databasename""" new_kwargs['db'] = dbname new_kwargs['host'] = host new_kwargs.update(self.credentials_for(host)) - dbpool = self._conn_pool_class(self._module, self._min_size, self._max_size, *self._args, **new_kwargs) + dbpool = self._conn_pool_class(self._module, min_size=self._min_size, max_size=self._max_size, + *self._args, **new_kwargs) self._databases[key] = dbpool return self._databases[key] diff --git a/eventlet/httpd.py b/eventlet/httpd.py index a5e8760..513e62b 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -337,22 +337,39 @@ class Request(object): return self._cached_body def read_body(self): + """ Returns the string body that was read off the request, or + the empty string if there was no request body. + + Requires a content-length header. Caches the body so multiple + calls to read_body() are free. + """ + if not hasattr(self, '_cached_body'): + length = self.get_header('content-length') + if length: + length = int(length) + if length: + self._cached_body = self.protocol.rfile.read(length) + else: + self._cached_body = '' + return self._cached_body + + def parsed_body(self): + """ Returns the parsed version of the body, using the + content-type header to select from the parsers on the site + object. + + If no parser is found, returns the string body from + read_body(). Caches the parsed body so multiple calls to + parsed_body() are free. + """ if not hasattr(self, '_cached_parsed_body'): - if not hasattr(self, '_cached_body'): - length = self.get_header('content-length') - if length: - length = int(length) - if length: - self._cached_body = self.protocol.rfile.read(length) - else: - self._cached_body = '' - body = self._cached_body + body = self.read_body() if hasattr(self.site, 'parsers'): parser = self.site.parsers.get( self.get_header('content-type')) if parser is not None: body = parser(body) - self._cached_parsed_body = body + self._cached_parsed_body = body return self._cached_parsed_body def override_body(self, body): diff --git a/eventlet/tests.py b/eventlet/tests.py index cf1827a..41968c2 100644 --- a/eventlet/tests.py +++ b/eventlet/tests.py @@ -23,7 +23,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -import atexit import errno import os import sys @@ -36,8 +35,8 @@ TestCase = unittest.TestCase name = getattr(sys.modules['__main__'], '__name__', None) main = unittest.main -# listing of all non-*_test test methods -eventlet_test_files = ['coros'] +# listing of files containing doctests +doc_test_files = ['coros'] def find_command(command): for dir in os.getenv('PATH', '/usr/bin:/usr/sbin').split(os.pathsep): @@ -46,8 +45,8 @@ def find_command(command): return p raise IOError(errno.ENOENT, 'Command not found: %r' % command) -def run_all_tests(test_files = eventlet_test_files): - """ Runs all the unit tests in eventlet, returning immediately after the +def run_all_tests(test_files = doc_test_files): + """ Runs all the unit tests, returning immediately after the first failed test. Returns true if the tests all succeeded. This method is really much longer @@ -81,8 +80,10 @@ def run_all_tests(test_files = eventlet_test_files): failures, tests = doctest.testmod(test_module) if failures: return False + else: + print "OK" return True if __name__ == '__main__': - run_all_tests() \ No newline at end of file + run_all_tests() From 4526fb480c5640ef9b031fcf4e4ee4b90f52453f Mon Sep 17 00:00:00 2001 From: "sabin.linden" Date: Thu, 14 Feb 2008 20:27:33 -0500 Subject: [PATCH 66/79] [svn r89] Changes made to allow dynamic resizing of the coroutine pool. Reviewed by Donovan. --- eventlet/pools.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/eventlet/pools.py b/eventlet/pools.py index 4f8ccb1..abb16fc 100644 --- a/eventlet/pools.py +++ b/eventlet/pools.py @@ -75,11 +75,20 @@ class Pool(object): def put(self, item): """Put an item back into the pool, when done """ + if self.current_size > self.max_size: + self.current_size -= 1 + return + if self.channel.balance < 0: self.channel.send(item) else: self.free_items.append(item) + def resize(self, new_size): + """Resize the pool + """ + self.max_size = new_size + def free(self): """Return the number of free items in the pool. """ From 6783d603654a7361018a1d3cd19dc3faaf03f5d7 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 18 Feb 2008 19:13:25 -0500 Subject: [PATCH 67/79] [svn r90] http://jira.secondlife.com/browse/EVT-14: Strange 400 error when using events and POST over HTTP/1.1. Turned out the problem was in the implementation of get_arg. --- eventlet/httpd.py | 11 +++++++---- eventlet/httpd_test.py | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 513e62b..27aedad 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -24,7 +24,6 @@ THE SOFTWARE. """ import cgi -import cStringIO import errno import socket import sys @@ -32,9 +31,13 @@ import time import urllib import socket import traceback -import cStringIO import BaseHTTPServer +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + from eventlet import api from eventlet import coros @@ -280,9 +283,9 @@ class Request(object): data = '' if self._query: data = self._query - fl = cStringIO.StringIO(data) else: - fl = self.protocol.rfile + data = self.read_body() + fl = StringIO(data) ## Allow our resource to provide the FieldStorage instance for ## customization purposes. headers = self.get_headers() diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py index dcf889e..fe52e70 100644 --- a/eventlet/httpd_test.py +++ b/eventlet/httpd_test.py @@ -79,14 +79,19 @@ def read_http(sock): num = int(headers[CONTENT_LENGTH]) body = sock.read(num) #print body + else: + body = None + + return response_line, headers, body class TestHttpd(tests.TestCase): mode = 'static' def setUp(self): self.logfile = StringIO() + self.site = Site() self.killer = api.spawn( - httpd.server, api.tcp_listener(('0.0.0.0', 12346)), Site(), max_size=128, log=self.logfile) + httpd.server, api.tcp_listener(('0.0.0.0', 12346)), self.site, max_size=128, log=self.logfile) def tearDown(self): api.kill(self.killer) @@ -153,9 +158,35 @@ class TestHttpd(tests.TestCase): sock.write(request) result = sock.readline() status = result.split(' ')[1] - print "status:",status self.assertEqual(status, '414') sock.close() + + def test_007_get_arg(self): + # define a new handler that does a get_arg as well as a read_body + def new_handle_request(req): + a = req.get_arg('a') + body = req.read_body() + req.write('a is %s, body is %s' % (a, body)) + self.site.handle_request = new_handle_request + + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + request = '\r\n'.join(( + 'POST /%s HTTP/1.0', + 'Host: localhost', + 'Content-Length: 3', + '', + 'a=a')) + sock.write(request) + + # send some junk after the actual request + sock.write('01234567890123456789') + reqline, headers, body = read_http(sock) + self.assertEqual(body, 'a is a, body is a=a') + sock.close() + + + if __name__ == '__main__': tests.main() From 91961b0250be2506d1093b60ed417156fd3f312c Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 19 Feb 2008 01:10:56 -0500 Subject: [PATCH 68/79] [svn r91] http://jira.secondlife.com/browse/EVT-13: Timer cleanup behavior is inconsistent between HTTP/1.1 and HTTP/1.0. This involved capturing a traceback on timer creation (based on a debug variable -- it'd be expensive to do that all the time), and adding timer cleanup code at the end of the handle method. --- eventlet/httpd.py | 19 ++++++++++++------- eventlet/runloop.py | 3 ++- eventlet/timer.py | 15 +++++++++++++-- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 27aedad..b00d411 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -492,12 +492,17 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): try: try: - self.server.site.handle_request(request) - except ErrorResponse, err: - request.response(code=err.code, - reason_phrase=err.reason, - headers=err.headers, - body=err.body) + try: + self.server.site.handle_request(request) + except ErrorResponse, err: + request.response(code=err.code, + reason_phrase=err.reason, + headers=err.headers, + body=err.body) + finally: + # clean up any timers that might have been left around by the handling code + api.get_hub().runloop.cancel_timers(api.getcurrent()) + # throw an exception if it failed to write a body if not request.response_written(): raise NotImplementedError("Handler failed to write response to request: %s" % request) @@ -507,7 +512,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): request.read_body() ## read & discard body except: pass - continue + except socket.error, e: # Broken pipe, connection reset by peer if e[0] in CONNECTION_CLOSED: diff --git a/eventlet/runloop.py b/eventlet/runloop.py index fb93549..3fde1a4 100644 --- a/eventlet/runloop.py +++ b/eventlet/runloop.py @@ -218,10 +218,11 @@ class RunLoop(object): if greenlet not in self.timers_by_greenlet: return for timer in self.timers_by_greenlet[greenlet]: - if timer.seconds: + if not timer.cancelled and timer.seconds: ## If timer.seconds is 0, this isn't a timer, it's ## actually eventlet's silly way of specifying whether ## a coroutine is "ready to run" or not. timer.cancel() + print 'Runloop cancelling left-over timer %s' % timer del self.timers_by_greenlet[greenlet] diff --git a/eventlet/timer.py b/eventlet/timer.py index f261b7d..94237ac 100644 --- a/eventlet/timer.py +++ b/eventlet/timer.py @@ -24,8 +24,12 @@ THE SOFTWARE. """ from eventlet.api import get_hub +""" If true, captures a stack trace for each timer when constructed. This is +useful for debugging leaking timers, to find out where the timer was set up. """ +_g_debug = False + class Timer(object): - __slots__ = ['seconds', 'tpl', 'called', 'cancelled', 'scheduled_time', 'greenlet'] + __slots__ = ['seconds', 'tpl', 'called', 'cancelled', 'scheduled_time', 'greenlet', 'traceback'] def __init__(self, seconds, cb, *args, **kw): """Create a timer. seconds: The minimum number of seconds to wait before calling @@ -40,12 +44,19 @@ class Timer(object): self.seconds = seconds self.tpl = cb, args, kw self.called = False + if _g_debug: + import traceback, cStringIO + self.traceback = cStringIO.StringIO() + traceback.print_stack(file=self.traceback) def __repr__(self): secs = getattr(self, 'seconds', None) cb, args, kw = getattr(self, 'tpl', (None, None, None)) - return "Timer(%s, %s, *%s, **%s)" % ( + retval = "Timer(%s, %s, *%s, **%s)" % ( secs, cb, args, kw) + if _g_debug and hasattr(self, 'traceback': + retval += '\n' + self.traceback.getvalue() + return retval def copy(self): cb, args, kw = self.tpl From 1c25f1c91a0da97583a38f6855c4f579dbfdcb12 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 19 Feb 2008 01:15:19 -0500 Subject: [PATCH 69/79] [svn r92] Argh, though I could get away with a last-minute pre-commit change but typo'd it. Never again\! --- eventlet/timer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eventlet/timer.py b/eventlet/timer.py index 94237ac..a91390a 100644 --- a/eventlet/timer.py +++ b/eventlet/timer.py @@ -54,7 +54,7 @@ class Timer(object): cb, args, kw = getattr(self, 'tpl', (None, None, None)) retval = "Timer(%s, %s, *%s, **%s)" % ( secs, cb, args, kw) - if _g_debug and hasattr(self, 'traceback': + if _g_debug and hasattr(self, 'traceback'): retval += '\n' + self.traceback.getvalue() return retval From 48321f90d145bc0b69627cc46d012a285149c249 Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Tue, 19 Feb 2008 18:24:32 -0500 Subject: [PATCH 70/79] [svn r93] "nonblocking" support in tpool, to be exploited by dbmgr, so that nonblocking DB calls don't need to context-switch to a native thread. --- eventlet/db_pool.py | 2 +- eventlet/httpd.py | 11 ++++++++++- eventlet/tpool.py | 24 +++++++++++++++++------- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index 189c446..3c91793 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -157,7 +157,7 @@ class GenericConnectionWrapper(object): def character_set_name(self,*args, **kwargs): return self._base.character_set_name(*args, **kwargs) def close(self,*args, **kwargs): return self._base.close(*args, **kwargs) def commit(self,*args, **kwargs): return self._base.commit(*args, **kwargs) - def cursor(self, cursorclass=None): return self._base.cursor(cursorclass) + def cursor(self, cursorclass=None, **kwargs): return self._base.cursor(cursorclass, **kwargs) def dump_debug_info(self,*args, **kwargs): return self._base.dump_debug_info(*args, **kwargs) def errno(self,*args, **kwargs): return self._base.errno(*args, **kwargs) def error(self,*args, **kwargs): return self._base.error(*args, **kwargs) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index b00d411..fb344df 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -495,6 +495,10 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): try: self.server.site.handle_request(request) except ErrorResponse, err: + import sys, traceback + (a,b,tb) = sys.exc_info() + traceback.print_exception(ErrorResponse,err,tb) + request.response(code=err.code, reason_phrase=err.reason, headers=err.headers, @@ -528,6 +532,11 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): raise self.socket.close() +def tid(): + n = long(id(api.getcurrent())) + if n < 0: + n = -n + return hex(n) class Server(BaseHTTPServer.HTTPServer): def __init__(self, socket, address, site, log, max_http_version=DEFAULT_MAX_HTTP_VERSION): @@ -543,7 +552,7 @@ class Server(BaseHTTPServer.HTTPServer): self.log = self def write(self, something): - sys.stdout.write('%s' % (something, )) + sys.stdout.write('%s: %s' % (tid(), something, )); sys.stdout.flush() def log_message(self, message): self.log.write(message) diff --git a/eventlet/tpool.py b/eventlet/tpool.py index 0f0719b..6ca505d 100644 --- a/eventlet/tpool.py +++ b/eventlet/tpool.py @@ -24,6 +24,7 @@ from sys import stdout from Queue import Empty, Queue from eventlet import api, coros, httpc, httpd, util, wrappedfd +from eventlet.api import trampoline, get_hub _rpipe, _wpipe = os.pipe() _rfile = os.fdopen(_rpipe,"r",0) @@ -36,10 +37,10 @@ def _signal_t2e(): _reqq = Queue(maxsize=-1) _rspq = Queue(maxsize=-1) -def trampoline(): +def tpool_trampoline(): global _reqq, _rspq while(True): - _c = _wrap_rfile.read(1) + _c = _wrap_rfile.recv(1) assert(_c != "") while not _rspq.empty(): try: @@ -62,14 +63,20 @@ def tworker(): try: rv = meth(*args,**kwargs) except Exception,exn: - rv = exn + import sys, traceback + (a,b,tb) = sys.exc_info() + rv = (exn,a,b,tb) _rspq.put((e,rv)) _signal_t2e() def erecv(e): rv = e.wait() - if isinstance(rv,Exception): - raise rv + if isinstance(rv,tuple) and len(rv) == 4 and isinstance(rv[0],Exception): + import sys, traceback + (e,a,b,tb) = rv + traceback.print_exception(Exception,e,tb) + traceback.print_stack() + raise e return rv def erpc(meth,*args, **kwargs): @@ -92,7 +99,10 @@ class Proxy(object): if not callable(f): return f def doit(*args, **kwargs): - rv = erpc(f,*args,**kwargs) + if kwargs.pop('nonblocking',False): + rv = f(*args, **kwargs) + else: + rv = erpc(f,*args,**kwargs) if type(rv) in self._autowrap: return Proxy(rv) else: @@ -108,6 +118,6 @@ def setup(): _threads[i].setDaemon(True) _threads[i].start() - api.spawn(trampoline) + api.spawn(tpool_trampoline) setup() From c25cd2f3d836382dc38e18f5c95411ec002591e4 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Tue, 19 Feb 2008 19:31:34 -0500 Subject: [PATCH 71/79] [svn r94] Reverted changes from r93 to httpd.py. While the tid is a useful concept, we should come up with a better design/implementation at some point in the future. --- eventlet/httpd.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index fb344df..ed1e7bd 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -495,10 +495,6 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): try: self.server.site.handle_request(request) except ErrorResponse, err: - import sys, traceback - (a,b,tb) = sys.exc_info() - traceback.print_exception(ErrorResponse,err,tb) - request.response(code=err.code, reason_phrase=err.reason, headers=err.headers, @@ -532,11 +528,6 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): raise self.socket.close() -def tid(): - n = long(id(api.getcurrent())) - if n < 0: - n = -n - return hex(n) class Server(BaseHTTPServer.HTTPServer): def __init__(self, socket, address, site, log, max_http_version=DEFAULT_MAX_HTTP_VERSION): @@ -552,7 +543,7 @@ class Server(BaseHTTPServer.HTTPServer): self.log = self def write(self, something): - sys.stdout.write('%s: %s' % (tid(), something, )); sys.stdout.flush() + sys.stdout.write('%s' % (something, )); sys.stdout.flush() def log_message(self, message): self.log.write(message) From bed461e78d03ff42ff13ab57c71eeabf14557cd2 Mon Sep 17 00:00:00 2001 From: "donovan.linden" Date: Fri, 29 Feb 2008 17:22:47 -0500 Subject: [PATCH 72/79] [svn r95] Remote tracebacks in https are now printed nicely on the client side --- eventlet/httpc.py | 11 ++++++++--- eventlet/httpd.py | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index da3975a..63c6caf 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -357,10 +357,15 @@ class InternalServerError(ConnectionError): def __repr__(self): try: import simplejson - body = simplejson.loads(self.params.response_body) + traceback = simplejson.loads(self.params.response_body) except: - traceback = self.params.response_body - else: + try: + from indra.base import llsd + traceback = llsd.parse(self.params.response_body) + except: + traceback = self.params.response_body + if isinstance(traceback, dict): + body = traceback traceback = "Traceback (most recent call last):\n" for frame in body['stack-trace']: traceback += ' File "%s", line %s, in %s\n' % ( diff --git a/eventlet/httpd.py b/eventlet/httpd.py index ed1e7bd..8940523 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -77,6 +77,7 @@ class Request(object): _producer_adapters = {} depth = 0 def __init__(self, protocol, method, path, headers): + self.context = {} self.request_start_time = time.time() self.site = protocol.server.site self.protocol = protocol From 97f256ccb7274dd6b899865f3fb1068d1f4a4f41 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Sun, 2 Mar 2008 18:59:20 -0500 Subject: [PATCH 73/79] [svn r96] A batch of exception-related cleanup and fixes. - insulated Actor against exceptions raised in the received() method and another very-infrequently-occuring bug in run_forever. - More verbose when GreenletExit is raised in CoroutinePool, since we're not even sure it happens. - Minor optimization in httpd -- exceptions are somewhat expensive to raise -- when keep-alive header isn't present. - More verbose exception logging in httpd -- previously these exceptions would have gone unnoticed. - Trimmed the logic in greenlib.switch to just call exc_clear() since there's no reason to even bother with the if statement. --- eventlet/coros.py | 59 +++++++++++++++++++++++++++++++++++++++--- eventlet/coros_test.py | 27 ++++++++++++++++++- eventlet/greenlib.py | 5 +--- eventlet/httpd.py | 8 +++--- 4 files changed, 87 insertions(+), 12 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index 6c0318c..8b89281 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -23,6 +23,7 @@ THE SOFTWARE. """ import collections +import sys import time import traceback @@ -253,8 +254,12 @@ class CoroutinePool(pools.Pool): result = func(*args, **kw) if evt is not None: evt.send(result) - except api.GreenletExit: - pass + except api.GreenletExit, e: + # we're printing this out to see if it ever happens + # in practice + print "GreenletExit raised in coroutine pool", e + if evt is not None: + evt.send(e) # sent as a return value, not an exception except Exception, e: traceback.print_exc() if evt is not None: @@ -352,12 +357,22 @@ class Actor(object): while True: if not self._mailbox: self._event.wait() - self._event.reset() + self._event = event() else: # leave the message in the mailbox until after it's # been processed so the event doesn't get triggered # while in the received method - self.received(self._mailbox[0]) + try: + self.received(self._mailbox[0]) + except KeyboardInterrupt: + raise # allow the program to quit + except: + # we don't want to let the exception escape this + # loop because that would kill the coroutine + e = sys.exc_info()[0] + self.excepted(e) + sys.exc_clear() + self._mailbox.popleft() def cast(self, message): @@ -394,9 +409,45 @@ class Actor(object): >>> api.sleep(0) received message 2 received message 3 + + >>> api.kill(a._killer) # test cleanup """ raise NotImplementedError() + def excepted(self, exc): + """ Called when the received method raises an exception. + + The default implementation simply prints out the raised exception. + Redefine it for customization. + + >>> class Exceptor(Actor): + ... def received(self, message): + ... if message == 'fail': + ... message + 1 + ... else: + ... print "received", message + ... def excepted(self, exc): + ... print "excepted:", exc + >>> a = Exceptor() + >>> a.cast('fail') + >>> api.sleep(0) + excepted: + + The main purpose of excepted is to prevent the actor's coroutine + from dying. + + >>> a.cast('message 2') + >>> api.sleep(0) + received message 2 + + If excepted() itself raises an exception, that will kill the coroutine. + + >>> api.kill(a._killer) # test cleanup + """ + print "Exception in %s.received(): %s" % ( + type(self).__name__, exc) + traceback.print_exc() + def _test(): print "Running doctests. There will be no further output if they succeed." import doctest diff --git a/eventlet/coros_test.py b/eventlet/coros_test.py index 0675c3e..7a28d3e 100644 --- a/eventlet/coros_test.py +++ b/eventlet/coros_test.py @@ -98,6 +98,16 @@ class TestEvent(tests.TestCase): api.spawn(send_to_event2) self.assertEqual(evt.wait(), value2) + def test_double_exception(self): + evt = coros.event() + # send an exception through the event + evt.send(exc=RuntimeError()) + self.assertRaises(RuntimeError, evt.wait) + evt.reset() + # shouldn't see the RuntimeError again + api.exc_after(0.001, api.TimeoutError) + self.assertRaises(api.TimeoutError, evt.wait) + class TestCoroutinePool(tests.TestCase): mode = 'static' def setUp(self): @@ -160,7 +170,7 @@ class TestActor(tests.TestCase): mode = 'static' def setUp(self): # raise an exception if we're waiting forever - self._cancel_timeout = api.exc_after(1, RuntimeError()) + self._cancel_timeout = api.exc_after(1, api.TimeoutError()) self.actor = IncrActor() def tearDown(self): @@ -213,7 +223,22 @@ class TestActor(tests.TestCase): self.assertEqual(msgs, [1,2,3,4,5]) + def test_raising_received(self): + msgs = [] + def received(message): + if message == 'fail': + raise RuntimeError() + else: + print "appending" + msgs.append(message) + + self.actor.received = received + self.actor.cast('fail') + api.sleep(0) + self.actor.cast('should_appear') + api.sleep(0) + self.assertEqual(['should_appear'], msgs) if __name__ == '__main__': tests.main() diff --git a/eventlet/greenlib.py b/eventlet/greenlib.py index 58dac7a..1dbf01e 100644 --- a/eventlet/greenlib.py +++ b/eventlet/greenlib.py @@ -306,10 +306,7 @@ def switch(other=None, value=None, exc=None): if not (other or hasattr(other, 'run')): raise SwitchingToDeadGreenlet("Switching to dead greenlet %r %r %r" % (other, value, exc)) _greenlet_context_call('swap_out') - running_exc = sys.exc_info() - if running_exc[0] != None: # see if we're in the middle of an exception handler - sys.exc_clear() # don't pass along exceptions to the other coroutine - del running_exc # tracebacks can create cyclic object references + sys.exc_clear() # don't pass along exceptions to the other coroutine try: rval = other.switch(value, exc) if not rval or not other: diff --git a/eventlet/httpd.py b/eventlet/httpd.py index 8940523..d0609b3 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -487,8 +487,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): request.set_header('Server', self.version_string()) request.set_header('Date', self.date_time_string()) try: - timeout = int(request.get_header('keep-alive')) - except (TypeError, ValueError), e: + timeout = int(request.get_header('keep-alive', timeout)) + except TypeError, ValueError: pass try: @@ -522,11 +522,13 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): else: raise except Exception, e: + self.server.log_message("Exception caught in HttpRequest.handle():\n") + self.server.log_exception(*sys.exc_info()) if not request.response_written(): request.response(500) request.write('Internal Server Error') self.socket.close() - raise + raise e # can't do a plain raise since exc_info might have been cleared self.socket.close() From 274b8ee63a058efed6956eaf7d3c1d65c571d3d4 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Sun, 2 Mar 2008 21:08:25 -0500 Subject: [PATCH 74/79] [svn r97] Fix for reason phrase persisting across http/1.1 requests. --- eventlet/httpd.py | 5 ++--- eventlet/httpd_test.py | 17 ++++++++++++++++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/eventlet/httpd.py b/eventlet/httpd.py index d0609b3..7d73756 100644 --- a/eventlet/httpd.py +++ b/eventlet/httpd.py @@ -415,8 +415,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): self.socket = self.request = self.rfile = self.wfile = request self.client_address = client_address self.server = server - self._code = 200 - self._message = 'OK' + self.set_response_code(None, 200, None) self.protocol_version = server.max_http_version def set_response_code(self, request, code, message): @@ -482,7 +481,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): self.close_connection = True continue - self._code = 200 + self.set_response_code(None, 200, None) request = Request(self, self.command, self.path, self.headers) request.set_header('Server', self.version_string()) request.set_header('Date', self.date_time_string()) diff --git a/eventlet/httpd_test.py b/eventlet/httpd_test.py index fe52e70..e7e0fbc 100644 --- a/eventlet/httpd_test.py +++ b/eventlet/httpd_test.py @@ -42,6 +42,10 @@ from eventlet import tests class Site(object): def handle_request(self, req): + path = req.path_segments() + if len(path) > 0 and path[0] == "notexist": + req.response(404, body='not found') + return req.write('hello world') def adapt(self, obj, req): @@ -185,7 +189,18 @@ class TestHttpd(tests.TestCase): self.assertEqual(body, 'a is a, body is a=a') sock.close() - + def test_008_correctresponse(self): + sock = api.connect_tcp( + ('127.0.0.1', 12346)) + + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + response_line_200,_,_ = read_http(sock) + sock.write('GET /notexist HTTP/1.1\r\nHost: localhost\r\n\r\n') + response_line_404,_,_ = read_http(sock) + sock.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') + response_line_test,_,_ = read_http(sock) + self.assertEqual(response_line_200,response_line_test) + sock.close() if __name__ == '__main__': From 38fca2637ff580ea9ae34221bfef0eb211502200 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 3 Mar 2008 17:00:52 -0500 Subject: [PATCH 75/79] [svn r98] Coroutine pools can now safely self-execute in the trivial case. The unit test decsribes the problem most succinctly. It turns out to be rather difficult to avoid inadvertently reentering a coroutine pool in code of moderate complexity, so it's better to simply make it safe to do so. Reviewed by Donovan. --- eventlet/coros.py | 66 +++++++++++++++++++++++++++++------------- eventlet/coros_test.py | 22 ++++++++++++-- 2 files changed, 66 insertions(+), 22 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index 8b89281..cd05163 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -33,6 +33,13 @@ from eventlet import channel from eventlet import pools from eventlet import greenlib + +try: + set +except NameError: # python 2.3 compatibility + from sets import Set as set + + class Cancelled(RuntimeError): pass @@ -245,27 +252,48 @@ class CoroutinePool(pools.Pool): foo 4 """ + def __init__(self, min_size=0, max_size=4): + self._greenlets = set() + super(CoroutinePool, self).__init__(min_size, max_size) + def _main_loop(self, sender): + """ Private, infinite loop run by a pooled coroutine. """ while True: recvd = sender.wait() sender.reset() (evt, func, args, kw) = recvd - try: - result = func(*args, **kw) - if evt is not None: - evt.send(result) - except api.GreenletExit, e: - # we're printing this out to see if it ever happens - # in practice - print "GreenletExit raised in coroutine pool", e - if evt is not None: - evt.send(e) # sent as a return value, not an exception - except Exception, e: - traceback.print_exc() - if evt is not None: - evt.send(exc=e) + self._safe_apply(evt, func, args, kw) api.get_hub().runloop.cancel_timers(api.getcurrent()) self.put(sender) + + def _safe_apply(self, evt, func, args, kw): + """ Private method that runs the function, catches exceptions, and + passes back the return value in the event.""" + try: + result = func(*args, **kw) + if evt is not None: + evt.send(result) + except api.GreenletExit, e: + # we're printing this out to see if it ever happens + # in practice + print "GreenletExit raised in coroutine pool", e + if evt is not None: + evt.send(e) # sent as a return value, not an exception + except Exception, e: + traceback.print_exc() + if evt is not None: + evt.send(exc=e) + + def _execute(self, evt, func, args, kw): + """ Private implementation of the execute methods. + """ + # if reentering an empty pool, don't try to wait on a coroutine freeing + # itself -- instead, just execute in the current coroutine + if self.free() == 0 and api.getcurrent() in self._greenlets: + self._safe_apply(evt, func, args, kw) + else: + sender = self.get() + sender.send((evt, func, args, kw)) def create(self): """Private implementation of eventlet.pools.Pool @@ -275,9 +303,9 @@ class CoroutinePool(pools.Pool): new coroutine, to be executed. """ sender = event() - api.spawn(self._main_loop, sender) + self._greenlets.add(api.spawn(self._main_loop, sender)) return sender - + def execute(self, func, *args, **kw): """Execute func in one of the coroutines maintained by the pool, when one is free. @@ -291,9 +319,8 @@ class CoroutinePool(pools.Pool): >>> evt.wait() ('foo', 1) """ - sender = self.get() receiver = event() - sender.send((receiver, func, args, kw)) + self._execute(receiver, func, args, kw) return receiver def execute_async(self, func, *args, **kw): @@ -310,8 +337,7 @@ class CoroutinePool(pools.Pool): >>> api.sleep(0) foo 1 """ - sender = self.get() - sender.send((None, func, args, kw)) + self._execute(None, func, args, kw) class pipe(object): diff --git a/eventlet/coros_test.py b/eventlet/coros_test.py index 7a28d3e..30e6cfc 100644 --- a/eventlet/coros_test.py +++ b/eventlet/coros_test.py @@ -112,7 +112,7 @@ class TestCoroutinePool(tests.TestCase): mode = 'static' def setUp(self): # raise an exception if we're waiting forever - self._cancel_timeout = api.exc_after(1, RuntimeError()) + self._cancel_timeout = api.exc_after(1, api.TimeoutError) def tearDown(self): self._cancel_timeout.cancel() @@ -161,6 +161,24 @@ class TestCoroutinePool(tests.TestCase): t = worker.wait() api.sleep(0) self.assertEquals(t.cancelled, True) + + def test_reentrant(self): + pool = coros.CoroutinePool(0,1) + def reenter(): + waiter = pool.execute(lambda a: a, 'reenter') + self.assertEqual('reenter', waiter.wait()) + + outer_waiter = pool.execute(reenter) + outer_waiter.wait() + + evt = coros.event() + def reenter_async(): + pool.execute_async(lambda a: a, 'reenter') + evt.send('done') + + pool.execute_async(reenter_async) + evt.wait() + class IncrActor(coros.Actor): def received(self, message): @@ -229,10 +247,10 @@ class TestActor(tests.TestCase): if message == 'fail': raise RuntimeError() else: - print "appending" msgs.append(message) self.actor.received = received + self.actor.excepted = lambda x: None self.actor.cast('fail') api.sleep(0) From 646b130cf04057d60b468c26511ced7a1bade052 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 3 Mar 2008 17:14:38 -0500 Subject: [PATCH 76/79] [svn r99] Minor change to make doctest work on both 2.3 and 2.5. --- eventlet/coros.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index cd05163..686f927 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -453,11 +453,12 @@ class Actor(object): ... else: ... print "received", message ... def excepted(self, exc): - ... print "excepted:", exc + ... # printing out exc varies per version of Python + ... print "excepted" >>> a = Exceptor() >>> a.cast('fail') >>> api.sleep(0) - excepted: + excepted The main purpose of excepted is to prevent the actor's coroutine from dying. From aca5baa34eb8c8d1462e1770dd9b27982b937730 Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Wed, 5 Mar 2008 04:26:30 -0500 Subject: [PATCH 77/79] [svn r100] Small tweaks to get saranwrap tests working on the Mac, mostly relating to changes in the behavior of copy and pickle modules between 2.3 and 2.5. It was bugging me. --- eventlet/saranwrap.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/eventlet/saranwrap.py b/eventlet/saranwrap.py index b3d6744..43e5fe3 100644 --- a/eventlet/saranwrap.py +++ b/eventlet/saranwrap.py @@ -87,7 +87,7 @@ request class is basically an action and a map of parameters' """ import os -import cPickle +from cPickle import dumps, loads import struct import sys @@ -97,13 +97,14 @@ try: except NameError: from sets import Set as set, ImmutableSet as frozenset -from eventlet.processes import Process +from eventlet.processes import Process, DeadProcess from eventlet import api, pools # debugging hooks _g_debug_mode = False if _g_debug_mode: import traceback + import tempfile def pythonpath_sync(): """ @@ -121,7 +122,10 @@ def wrap(obj, dead_callback = None): if type(obj).__name__ == 'module': return wrap_module(obj.__name__, dead_callback) pythonpath_sync() - p = Process('python', [__file__, '--child'], dead_callback) + if _g_debug_mode: + p = Process('python', [__file__, '--child', '--logfile', os.path.join(tempfile.gettempdir(), 'saranwrap.log')], dead_callback) + else: + p = Process('python', [__file__, '--child'], dead_callback) prox = Proxy(ChildProcess(p, p)) prox.obj = obj return prox.obj @@ -134,7 +138,7 @@ def wrap_module(fqname, dead_callback = None): pythonpath_sync() global _g_debug_mode if _g_debug_mode: - p = Process('python', [__file__, '--module', fqname, '--logfile', '/tmp/saranwrap.log'], dead_callback) + p = Process('python', [__file__, '--module', fqname, '--logfile', os.path.join(tempfile.gettempdir(), 'saranwrap.log')], dead_callback) else: p = Process('python', [__file__, '--module', fqname,], dead_callback) prox = Proxy(ChildProcess(p,p)) @@ -187,8 +191,8 @@ def _read_response(id, attribute, input, cp): try: str = _read_lp_hunk(input) _prnt(`str`) - response = cPickle.loads(str) - except AttributeError, e: + response = loads(str) + except (AttributeError, DeadProcess), e: raise UnrecoverableError(e) _prnt("response: %s" % response) if response[0] == 'value': @@ -211,7 +215,7 @@ def _write_lp_hunk(stream, hunk): def _write_request(param, output): _prnt("request: %s" % param) - str = cPickle.dumps(param) + str = dumps(param) _write_lp_hunk(output, str) def _is_local(attribute): @@ -303,6 +307,10 @@ not supported, so you have to know what has been exported. # call base class getattribute so we actually get the local variable attribute = _unmunge_attr_name(attribute) return super(Proxy, self).__getattribute__(attribute) + elif attribute in ('__deepcopy__', '__copy__'): + # redirect copy function calls to our own versions instead of + # to the proxied object + return super(Proxy, self).__getattribute__('__deepcopy__') else: my_cp = self.__local_dict['_cp'] my_id = self.__local_dict['_id'] @@ -536,7 +544,7 @@ when the id is None.""" str = _read_lp_hunk(self._in) except EOFError: sys.exit(0) # normal exit - request = cPickle.loads(str) + request = loads(str) _log("request: %s (%s)" % (request, self._objects)) req = request id = None @@ -599,7 +607,7 @@ when the id is None.""" def respond(self, body): _log("responding with: %s" % body) #_log("objects: %s" % self._objects) - s = cPickle.dumps(body) + s = dumps(body) _log(`s`) str = _write_lp_hunk(self._out, s) From 5cbe01ad509acbfcbb42e509367395b0db57b22a Mon Sep 17 00:00:00 2001 From: "which.linden" Date: Mon, 10 Mar 2008 22:26:32 -0400 Subject: [PATCH 78/79] [svn r101] Actor is extended to be able to process multiple messages concurrently. The default is 1, naturally. The tests had to be changed substantially because the additional handoff associated with using the coroutine pool made single api.sleep(0) insufficient to force an execution of the received method. Also switched to a deque in pools.py since that maps more closely to the problem domain. Reviewed by jonathan and donovan. --- eventlet/coros.py | 79 ++++++++++++++-------------------------- eventlet/coros_test.py | 82 ++++++++++++++++++++++++++++++------------ eventlet/pools.py | 5 +-- 3 files changed, 89 insertions(+), 77 deletions(-) diff --git a/eventlet/coros.py b/eventlet/coros.py index 686f927..c53f807 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -279,6 +279,8 @@ class CoroutinePool(pools.Pool): print "GreenletExit raised in coroutine pool", e if evt is not None: evt.send(e) # sent as a return value, not an exception + except KeyboardInterrupt: + raise # allow program to exit except Exception, e: traceback.print_exc() if evt is not None: @@ -372,11 +374,17 @@ class Actor(object): coroutine exists; if you lose all references to the actor object it will never be freed. """ - def __init__(self): - """ Constructs an Actor, kicking off a new coroutine to process the messages. """ + def __init__(self, concurrency = 1): + """ Constructs an Actor, kicking off a new coroutine to process the messages. + + The concurrency argument specifies how many messages the actor will try + to process concurrently. If it is 1, the actor will process messages + serially. + """ self._mailbox = collections.deque() self._event = event() self._killer = api.spawn(self.run_forever) + self._pool = CoroutinePool(min_size=0, max_size=concurrency) def run_forever(self): """ Loops forever, continually checking the mailbox. """ @@ -388,17 +396,8 @@ class Actor(object): # leave the message in the mailbox until after it's # been processed so the event doesn't get triggered # while in the received method - try: - self.received(self._mailbox[0]) - except KeyboardInterrupt: - raise # allow the program to quit - except: - # we don't want to let the exception escape this - # loop because that would kill the coroutine - e = sys.exc_info()[0] - self.excepted(e) - sys.exc_clear() - + self._pool.execute_async( + self.received, self._mailbox[0]) self._mailbox.popleft() def cast(self, message): @@ -423,16 +422,24 @@ class Actor(object): replace it with something useful! >>> class Greeter(Actor): - ... def received(self, message): + ... def received(self, (message, evt) ): ... print "received", message + ... if evt: evt.send() ... >>> a = Greeter() - >>> a.cast("message 1") - >>> api.sleep(0) # need to explicitly yield to cause the actor to run + + This example uses events to synchronize between the actor and the main + coroutine in a predictable manner, but this kinda defeats the point of + the Actor, so don't do it in a real application. + + >>> evt = event() + >>> a.cast( ("message 1", evt) ) + >>> evt.wait() # force it to run at this exact moment received message 1 - >>> a.cast("message 2") - >>> a.cast("message 3") - >>> api.sleep(0) + >>> evt.reset() + >>> a.cast( ("message 2", None) ) + >>> a.cast( ("message 3", evt) ) + >>> evt.wait() received message 2 received message 3 @@ -440,40 +447,6 @@ class Actor(object): """ raise NotImplementedError() - def excepted(self, exc): - """ Called when the received method raises an exception. - - The default implementation simply prints out the raised exception. - Redefine it for customization. - - >>> class Exceptor(Actor): - ... def received(self, message): - ... if message == 'fail': - ... message + 1 - ... else: - ... print "received", message - ... def excepted(self, exc): - ... # printing out exc varies per version of Python - ... print "excepted" - >>> a = Exceptor() - >>> a.cast('fail') - >>> api.sleep(0) - excepted - - The main purpose of excepted is to prevent the actor's coroutine - from dying. - - >>> a.cast('message 2') - >>> api.sleep(0) - received message 2 - - If excepted() itself raises an exception, that will kill the coroutine. - - >>> api.kill(a._killer) # test cleanup - """ - print "Exception in %s.received(): %s" % ( - type(self).__name__, exc) - traceback.print_exc() def _test(): print "Running doctests. There will be no further output if they succeed." diff --git a/eventlet/coros_test.py b/eventlet/coros_test.py index 30e6cfc..a72bc7a 100644 --- a/eventlet/coros_test.py +++ b/eventlet/coros_test.py @@ -181,8 +181,9 @@ class TestCoroutinePool(tests.TestCase): class IncrActor(coros.Actor): - def received(self, message): + def received(self, evt): self.value = getattr(self, 'value', 0) + 1 + if evt: evt.send() class TestActor(tests.TestCase): mode = 'static' @@ -196,18 +197,23 @@ class TestActor(tests.TestCase): api.kill(self.actor._killer) def test_cast(self): - self.actor.cast(1) - api.sleep(0) + evt = coros.event() + self.actor.cast(evt) + evt.wait() + evt.reset() self.assertEqual(self.actor.value, 1) - self.actor.cast(1) - api.sleep(0) + self.actor.cast(evt) + evt.wait() self.assertEqual(self.actor.value, 2) def test_cast_multi_1(self): # make sure that both messages make it in there - self.actor.cast(1) - self.actor.cast(1) - api.sleep(0) + evt = coros.event() + evt1 = coros.event() + self.actor.cast(evt) + self.actor.cast(evt1) + evt.wait() + evt1.wait() self.assertEqual(self.actor.value, 2) def test_cast_multi_2(self): @@ -221,21 +227,24 @@ class TestActor(tests.TestCase): # yields, eventually all messages are delivered msgs = [] waiters = [] - def received(message): - evt = coros.event() - waiters.append(evt) + def received( (message, evt) ): api.sleep(0) msgs.append(message) evt.send() self.actor.received = received - self.actor.cast(1) + waiters.append(coros.event()) + self.actor.cast( (1, waiters[-1])) api.sleep(0) - self.actor.cast(2) - self.actor.cast(3) + waiters.append(coros.event()) + self.actor.cast( (2, waiters[-1]) ) + waiters.append(coros.event()) + self.actor.cast( (3, waiters[-1]) ) api.sleep(0) - self.actor.cast(4) - self.actor.cast(5) + waiters.append(coros.event()) + self.actor.cast( (4, waiters[-1]) ) + waiters.append(coros.event()) + self.actor.cast( (5, waiters[-1]) ) for evt in waiters: evt.wait() self.assertEqual(msgs, [1,2,3,4,5]) @@ -243,20 +252,49 @@ class TestActor(tests.TestCase): def test_raising_received(self): msgs = [] - def received(message): + def received( (message, evt) ): + evt.send() if message == 'fail': raise RuntimeError() else: msgs.append(message) self.actor.received = received - self.actor.excepted = lambda x: None - self.actor.cast('fail') - api.sleep(0) - self.actor.cast('should_appear') - api.sleep(0) + evt = coros.event() + self.actor.cast( ('fail', evt) ) + evt.wait() + evt.reset() + self.actor.cast( ('should_appear', evt) ) + evt.wait() self.assertEqual(['should_appear'], msgs) + def test_multiple(self): + self.actor = IncrActor(concurrency=2) + total = [0] + def received( (func, ev, value) ): + func() + total[0] += value + ev.send() + self.actor.received = received + + def onemoment(): + api.sleep(0.1) + + evt = coros.event() + evt1 = coros.event() + + self.actor.cast( (onemoment, evt, 1) ) + self.actor.cast( (lambda: None, evt1, 2) ) + + evt1.wait() + self.assertEqual(total[0], 2) + # both coroutines should have been used + self.assertEqual(self.actor._pool.current_size, 2) + self.assertEqual(self.actor._pool.free(), 1) + evt.wait() + self.assertEqual(total[0], 3) + self.assertEqual(self.actor._pool.free(), 2) + if __name__ == '__main__': tests.main() diff --git a/eventlet/pools.py b/eventlet/pools.py index abb16fc..5e89dc4 100644 --- a/eventlet/pools.py +++ b/eventlet/pools.py @@ -22,6 +22,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import collections import os import socket @@ -57,7 +58,7 @@ class Pool(object): self.max_size = max_size self.current_size = 0 self.channel = channel.channel() - self.free_items = [] + self.free_items = collections.deque() for x in range(min_size): self.current_size += 1 self.free_items.append(self.create()) @@ -66,7 +67,7 @@ class Pool(object): """Return an item from the pool, when one is available """ if self.free_items: - return self.free_items.pop(0) + return self.free_items.popleft() if self.current_size < self.max_size: self.current_size += 1 return self.create() From a19d0686217839aa8d6a44d30ea861285589ab70 Mon Sep 17 00:00:00 2001 From: "seeping.blister" Date: Wed, 12 Mar 2008 00:50:17 -0400 Subject: [PATCH 79/79] [svn r102] added exception classes for all the HTTP error codes needed to support CHTTP's exception semantics. EXCEPT for code 413, which is still ambiguous enough that I wasn't sure what exactly to do. --- eventlet/httpc.py | 58 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/eventlet/httpc.py b/eventlet/httpc.py index 63c6caf..fd39ab3 100644 --- a/eventlet/httpc.py +++ b/eventlet/httpc.py @@ -23,6 +23,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import copy import datetime import httplib import os.path @@ -220,9 +221,20 @@ class _LocalParams(_Params): setattr(self, k, v) def __getattr__(self, key): + if key == '__setstate__': return return getattr(self._delegate, key) - + def __reduce__(self): + params = copy.copy(self._delegate) + kwargs = copy.copy(self.__dict__) + assert(kwargs.has_key('_delegate')) + del kwargs['_delegate'] + if hasattr(params,'aux'): del params.aux + return (_LocalParams,(params,),kwargs) + + def __setitem__(self, k, item): + setattr(self, k, item) + class ConnectionError(Exception): """Detailed exception class for reporting on http connection problems. @@ -323,6 +335,14 @@ class TemporaryRedirect(Retriable): class BadRequest(ConnectionError): """ 400 Bad Request """ pass + +class Unauthorized(ConnectionError): + """ 401 Unauthorized """ + pass + +class PaymentRequired(ConnectionError): + """ 402 Payment Required """ + pass class Forbidden(ConnectionError): @@ -339,18 +359,43 @@ class Gone(ConnectionError): """ 410 Gone """ pass +class LengthRequired(ConnectionError): + """ 411 Length Required """ + pass + +class RequestURITooLong(ConnectionError): + """ 414 Request-URI Too Long """ + pass + +class UnsupportedMediaType(ConnectionError): + """ 415 Unsupported Media Type """ + pass + +class RequestedRangeNotSatisfiable(ConnectionError): + """ 416 Requested Range Not Satisfiable """ + pass + +class ExpectationFailed(ConnectionError): + """ 417 Expectation Failed """ + pass + +class NotImplemented(ConnectionError): + """ 501 Not Implemented """ + pass class ServiceUnavailable(Retriable): """ 503 Service Unavailable """ def url(self): return self.params._delegate.url - class GatewayTimeout(Retriable): """ 504 Gateway Timeout """ def url(self): return self.params._delegate.url +class HTTPVersionNotSupported(ConnectionError): + """ 505 HTTP Version Not Supported """ + pass class InternalServerError(ConnectionError): """ 500 Internal Server Error """ @@ -389,12 +434,21 @@ status_to_error_map = { 304: NotModified, 307: TemporaryRedirect, 400: BadRequest, + 401: Unauthorized, + 402: PaymentRequired, 403: Forbidden, 404: NotFound, 410: Gone, + 411: LengthRequired, + 414: RequestURITooLong, + 415: UnsupportedMediaType, + 416: RequestedRangeNotSatisfiable, + 417: ExpectationFailed, 500: InternalServerError, + 501: NotImplemented, 503: ServiceUnavailable, 504: GatewayTimeout, + 505: HTTPVersionNotSupported, } scheme_to_factory_map = {