python3 compat: print() function syntax

This commit is contained in:
Sergey Shepelev
2013-10-07 13:09:41 +04:00
parent 80633ab224
commit 35f600600c
48 changed files with 289 additions and 221 deletions

View File

@@ -1,4 +1,5 @@
"""Test context switching performance of threading and eventlet"""
from __future__ import print_function
import threading
import time
@@ -10,6 +11,7 @@ from eventlet.hubs import pyevent, epolls, poll, selects
CONTEXT_SWITCHES = 100000
def run(event, wait_event):
counter = 0
while counter <= CONTEXT_SWITCHES:
@@ -18,6 +20,7 @@ def run(event, wait_event):
counter += 1
event.send()
def test_eventlet():
event1 = eventlet.event.Event()
event2 = eventlet.event.Event()
@@ -28,6 +31,7 @@ def test_eventlet():
thread1.wait()
thread2.wait()
class BenchThread(threading.Thread):
def __init__(self, event, wait_event):
threading.Thread.__init__(self)
@@ -42,8 +46,8 @@ class BenchThread(threading.Thread):
self.counter += 1
self.event.set()
def test_thread():
def test_thread():
event1 = threading.Event()
event2 = threading.Event()
event1.set()
@@ -54,39 +58,40 @@ def test_thread():
thread1.join()
thread2.join()
print "Testing with %d context switches" % CONTEXT_SWITCHES
print("Testing with %d context switches" % CONTEXT_SWITCHES)
start = time.time()
test_thread()
print "threading: %.02f seconds" % (time.time() - start)
print("threading: %.02f seconds" % (time.time() - start))
try:
hubs.use_hub(pyevent)
start = time.time()
test_eventlet()
print "pyevent: %.02f seconds" % (time.time() - start)
print("pyevent: %.02f seconds" % (time.time() - start))
except:
print "pyevent hub unavailable"
print("pyevent hub unavailable")
try:
hubs.use_hub(epolls)
start = time.time()
test_eventlet()
print "epoll: %.02f seconds" % (time.time() - start)
print("epoll: %.02f seconds" % (time.time() - start))
except:
print "epoll hub unavailable"
print("epoll hub unavailable")
try:
hubs.use_hub(poll)
start = time.time()
test_eventlet()
print "poll: %.02f seconds" % (time.time() - start)
print("poll: %.02f seconds" % (time.time() - start))
except:
print "poll hub unavailable"
print("poll hub unavailable")
try:
hubs.use_hub(selects)
start = time.time()
test_eventlet()
print "select: %.02f seconds" % (time.time() - start)
print("select: %.02f seconds" % (time.time() - start))
except:
print "select hub unavailable"
print("select hub unavailable")

View File

@@ -1,4 +1,5 @@
#! /usr/bin/env python
from __future__ import print_function
# test timer adds & expires on hubs.hub.BaseHub
@@ -38,4 +39,4 @@ hub.prepare_timers()
end = time.time()
print "Duration: %f" % (end-start,)
print("Duration: %f" % (end-start,))

View File

@@ -1,19 +1,24 @@
"""Benchmark evaluating eventlet's performance at speaking to itself over a localhost socket."""
from __future__ import print_function
import time
import benchmarks
BYTES=1000
SIZE=1
CONCURRENCY=50
TRIES=5
def reader(sock):
expect = BYTES
while expect > 0:
d = sock.recv(min(expect, SIZE))
expect -= len(d)
def writer(addr, socket_impl):
sock = socket_impl(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
@@ -22,13 +27,14 @@ def writer(addr, socket_impl):
d = 'xy' * (max(min(SIZE/2, BYTES-sent), 1))
sock.sendall(d)
sent += len(d)
def green_accepter(server_sock, pool):
for i in xrange(CONCURRENCY):
sock, addr = server_sock.accept()
pool.spawn_n(reader, sock)
def heavy_accepter(server_sock, pool):
for i in xrange(CONCURRENCY):
sock, addr = server_sock.accept()
@@ -36,12 +42,14 @@ def heavy_accepter(server_sock, pool):
t.start()
pool.append(t)
import eventlet.green.socket
import eventlet
from eventlet import debug
debug.hub_exceptions(True)
def launch_green_threads():
pool = eventlet.GreenPool(CONCURRENCY * 2 + 1)
server_sock = eventlet.green.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -52,10 +60,12 @@ def launch_green_threads():
for i in xrange(CONCURRENCY):
pool.spawn_n(writer, addr, eventlet.green.socket.socket)
pool.waitall()
import threading
import socket
import socket
def launch_heavy_threads():
threads = []
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -77,29 +87,29 @@ if __name__ == "__main__":
import optparse
parser = optparse.OptionParser()
parser.add_option('--compare-threading', action='store_true', dest='threading', default=False)
parser.add_option('-b', '--bytes', type='int', dest='bytes',
parser.add_option('-b', '--bytes', type='int', dest='bytes',
default=BYTES)
parser.add_option('-s', '--size', type='int', dest='size',
parser.add_option('-s', '--size', type='int', dest='size',
default=SIZE)
parser.add_option('-c', '--concurrency', type='int', dest='concurrency',
parser.add_option('-c', '--concurrency', type='int', dest='concurrency',
default=CONCURRENCY)
parser.add_option('-t', '--tries', type='int', dest='tries',
parser.add_option('-t', '--tries', type='int', dest='tries',
default=TRIES)
opts, args = parser.parse_args()
BYTES=opts.bytes
SIZE=opts.size
CONCURRENCY=opts.concurrency
TRIES=opts.tries
funcs = [launch_green_threads]
if opts.threading:
funcs = [launch_green_threads, launch_heavy_threads]
results = benchmarks.measure_best(TRIES, 3,
lambda: None, lambda: None,
*funcs)
print "green:", results[launch_green_threads]
print("green:", results[launch_green_threads])
if opts.threading:
print "threads:", results[launch_heavy_threads]
print "%", (results[launch_green_threads]-results[launch_heavy_threads])/results[launch_heavy_threads] * 100
print("threads:", results[launch_heavy_threads])
print("%", (results[launch_green_threads]-results[launch_heavy_threads])/results[launch_heavy_threads] * 100)

View File

@@ -1,34 +1,41 @@
"""Compare spawn to spawn_n"""
from __future__ import print_function
import eventlet
import benchmarks
def cleanup():
eventlet.sleep(0.2)
iters = 10000
best = benchmarks.measure_best(5, iters,
'pass',
cleanup,
eventlet.sleep)
print "eventlet.sleep (main)", best[eventlet.sleep]
print("eventlet.sleep (main)", best[eventlet.sleep])
gt = eventlet.spawn(benchmarks.measure_best,5, iters,
'pass',
cleanup,
eventlet.sleep)
best = gt.wait()
print "eventlet.sleep (gt)", best[eventlet.sleep]
print("eventlet.sleep (gt)", best[eventlet.sleep])
def dummy(i=None):
return i
def run_spawn():
eventlet.spawn(dummy, 1)
def run_spawn_n():
eventlet.spawn_n(dummy, 1)
def run_spawn_n_kw():
eventlet.spawn_n(dummy, i=1)
@@ -36,35 +43,40 @@ def run_spawn_n_kw():
best = benchmarks.measure_best(5, iters,
'pass',
cleanup,
run_spawn_n,
run_spawn_n,
run_spawn,
run_spawn_n_kw)
print "eventlet.spawn", best[run_spawn]
print "eventlet.spawn_n", best[run_spawn_n]
print "eventlet.spawn_n(**kw)", best[run_spawn_n_kw]
print "%% %0.1f" % ((best[run_spawn]-best[run_spawn_n])/best[run_spawn_n] * 100)
print("eventlet.spawn", best[run_spawn])
print("eventlet.spawn_n", best[run_spawn_n])
print("eventlet.spawn_n(**kw)", best[run_spawn_n_kw])
print("%% %0.1f" % ((best[run_spawn]-best[run_spawn_n])/best[run_spawn_n] * 100))
pool = None
def setup():
global pool
pool = eventlet.GreenPool(iters)
def run_pool_spawn():
pool.spawn(dummy, 1)
def run_pool_spawn_n():
pool.spawn_n(dummy, 1)
def cleanup_pool():
pool.waitall()
best = benchmarks.measure_best(3, iters,
setup,
cleanup_pool,
run_pool_spawn,
run_pool_spawn,
run_pool_spawn_n,
)
print "eventlet.GreenPool.spawn", best[run_pool_spawn]
print "eventlet.GreenPool.spawn_n", best[run_pool_spawn_n]
print "%% %0.1f" % ((best[run_pool_spawn]-best[run_pool_spawn_n])/best[run_pool_spawn_n] * 100)
print("eventlet.GreenPool.spawn", best[run_pool_spawn])
print("eventlet.GreenPool.spawn_n", best[run_pool_spawn_n])
print("%% %0.1f" % ((best[run_pool_spawn]-best[run_pool_spawn_n])/best[run_pool_spawn_n] * 100))

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
import socket
import sys
import errno
@@ -21,10 +23,10 @@ class FileProxy(object):
def __init__(self, f):
self.f = f
def isatty(self):
def isatty(self):
return True
def flush(self):
def flush(self):
pass
def write(self, *a, **kw):
@@ -67,7 +69,7 @@ class SocketConsole(greenlets.greenlet):
def finalize(self):
# restore the state of the socket
self.desc = None
print "backdoor closed to %s:%s" % self.hostport
print("backdoor closed to %s:%s" % self.hostport)
def backdoor_server(sock, locals=None):
@@ -79,7 +81,7 @@ def backdoor_server(sock, locals=None):
of the interpreters. It can be convenient to stick important application
variables in here.
"""
print "backdoor server listening on %s:%s" % sock.getsockname()
print("backdoor server listening on %s:%s" % sock.getsockname())
try:
try:
while True:
@@ -100,7 +102,7 @@ def backdoor((conn, addr), locals=None):
(such as backdoor_server).
"""
host, port = addr
print "backdoor to %s:%s" % (host, port)
print("backdoor to %s:%s" % (host, port))
fl = conn.makefile("rw")
console = SocketConsole(fl, (host, port), locals)
hub = hubs.get_hub()

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
import collections
import traceback
import warnings
@@ -8,12 +10,14 @@ from eventlet import hubs
from eventlet import greenthread
from eventlet import semaphore as semaphoremod
class NOT_USED:
def __repr__(self):
return 'NOT_USED'
NOT_USED = NOT_USED()
def Event(*a, **kw):
warnings.warn("The Event class has been moved to the event module! "
"Please construct event.Event objects instead.",
@@ -34,12 +38,14 @@ def Semaphore(count):
DeprecationWarning, stacklevel=2)
return semaphoremod.Semaphore(count)
def BoundedSemaphore(count):
warnings.warn("The BoundedSemaphore class has moved! Please "
"use semaphore.BoundedSemaphore instead.",
DeprecationWarning, stacklevel=2)
return semaphoremod.BoundedSemaphore(count)
def semaphore(count=0, limit=None):
warnings.warn("coros.semaphore is deprecated. Please use either "
"semaphore.Semaphore or semaphore.BoundedSemaphore instead.",
@@ -58,7 +64,7 @@ class metaphore(object):
>>> count = coros.metaphore()
>>> count.wait()
>>> def decrementer(count, id):
... print "%s decrementing" % id
... print("{0} decrementing".format(id))
... count.dec()
...
>>> _ = eventlet.spawn(decrementer, count, 'A')
@@ -106,6 +112,7 @@ class metaphore(object):
"""
self.event.wait()
def execute(func, *args, **kw):
""" Executes an operation asynchronously in a new coroutine, returning
an event to retrieve the return value.

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
from collections import deque
import sys
import time
@@ -158,7 +160,7 @@ class BaseConnectionPool(Pool):
pass # conn is None, or junk
except:
if not quiet:
print "Connection.close raised: %s" % (sys.exc_info()[1])
print("Connection.close raised: %s" % (sys.exc_info()[1]))
def get(self):
conn = super(BaseConnectionPool, self).get()
@@ -210,7 +212,7 @@ class BaseConnectionPool(Pool):
except:
# we don't care what the exception was, we just know the
# connection is dead
print "WARNING: connection.rollback raised: %s" % (sys.exc_info()[1])
print("WARNING: connection.rollback raised: %s" % (sys.exc_info()[1]))
conn = None
if conn is not None:

View File

@@ -1,5 +1,6 @@
"""The debug module contains utilities and functions for better
debugging Eventlet-powered applications."""
from __future__ import print_function
import os
import sys
@@ -40,7 +41,7 @@ class Spew(object):
line = 'Unknown code named [%s]. VM instruction #%d' % (
frame.f_code.co_name, frame.f_lasti)
if self.trace_names is None or name in self.trace_names:
print '%s:%s: %s' % (name, lineno, line.rstrip())
print('%s:%s: %s' % (name, lineno, line.rstrip()))
if not self.show_values:
return self
details = []
@@ -51,7 +52,7 @@ class Spew(object):
if tok in frame.f_locals:
details.append('%s=%r' % (tok, frame.f_locals[tok]))
if details:
print "\t%s" % ' '.join(details)
print("\t%s" % ' '.join(details))
return self

View File

@@ -1,14 +1,18 @@
from __future__ import print_function
from eventlet import hubs
from eventlet.support import greenlets as greenlet
__all__ = ['Event']
class NOT_USED:
def __repr__(self):
return 'NOT_USED'
NOT_USED = NOT_USED()
class Event(object):
"""An abstraction where an arbitrary number of coroutines
can wait for one event from another.
@@ -97,7 +101,7 @@ class Event(object):
>>> evt = event.Event()
>>> def wait_on():
... retval = evt.wait()
... print "waited for", retval
... print("waited for", retval)
>>> _ = eventlet.spawn(wait_on)
>>> evt.send('result')
>>> eventlet.sleep(0)
@@ -128,9 +132,9 @@ class Event(object):
>>> import eventlet
>>> evt = event.Event()
>>> def waiter():
... print 'about to wait'
... print('about to wait')
... result = evt.wait()
... print 'waited for', result
... print('waited for', result)
>>> _ = eventlet.spawn(waiter)
>>> eventlet.sleep(0)
about to wait

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
from eventlet import coros, proc, api
from eventlet.semaphore import Semaphore
@@ -26,7 +28,7 @@ class Pool(object):
discarded. The return value of :meth:`free` will be negative in this
situation.
"""
max_size_delta = new_max_size - self.max_size
max_size_delta = new_max_size - self.max_size
self.sem.counter += max_size_delta
self.max_size = new_max_size
@@ -77,7 +79,7 @@ class Pool(object):
return p
def waitall(self):
""" Calling this function blocks until every coroutine
""" Calling this function blocks until every coroutine
completes its work (i.e. there are 0 running coroutines)."""
return self.procs.waitall()
@@ -87,7 +89,7 @@ class Pool(object):
"""Wait for the next execute in the pool to complete,
and return the result."""
return self.results.wait()
def waiting(self):
"""Return the number of coroutines waiting to execute.
"""
@@ -110,7 +112,7 @@ class Pool(object):
>>> pool = Pool()
>>> def saw(x):
... print "I saw %s!" % x
... print("I saw %s!" % x)
...
>>> pool.launch_all(saw, "ABC")
>>> pool.wait_all()
@@ -130,7 +132,7 @@ class Pool(object):
>>> from eventlet import coros
>>> pool = coros.CoroutinePool()
>>> def saw(x): print "I saw %s!" % x
>>> def saw(x): print("I saw %s!" % x)
...
>>> pool.process_all(saw, "DEF")
I saw D!
@@ -190,11 +192,11 @@ class Pool(object):
>>> pool = coros.CoroutinePool(max_size=5)
>>> pausers = [coros.Event() for x in xrange(2)]
>>> def longtask(evt, desc):
... print "%s woke up with %s" % (desc, evt.wait())
... print("%s woke up with %s" % (desc, evt.wait()))
...
>>> pool.launch_all(longtask, zip(pausers, "AB"))
>>> def quicktask(desc):
... print "returning %s" % desc
... print("returning %s" % desc)
... return desc
...
@@ -202,39 +204,39 @@ class Pool(object):
items individually to illustrate timing)
>>> step = iter(pool.generate_results(quicktask, string.ascii_lowercase))
>>> print step.next()
>>> print(step.next())
returning a
returning b
returning c
a
>>> print step.next()
>>> print(step.next())
b
>>> print step.next()
>>> print(step.next())
c
>>> print step.next()
>>> print(step.next())
returning d
returning e
returning f
d
>>> pausers[0].send("A")
>>> print step.next()
>>> print(step.next())
e
>>> print step.next()
>>> print(step.next())
f
>>> print step.next()
>>> print(step.next())
A woke up with A
returning g
returning h
returning i
g
>>> print "".join([step.next() for x in xrange(3)])
>>> print("".join([step.next() for x in xrange(3)]))
returning j
returning k
returning l
returning m
hij
>>> pausers[1].send("B")
>>> print "".join([step.next() for x in xrange(4)])
>>> print("".join([step.next() for x in xrange(4)]))
B woke up with B
returning n
returning o
@@ -314,3 +316,4 @@ class Pool(object):
yield q.wait()
finished += 1

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
import collections
from eventlet import queue
@@ -15,8 +17,8 @@ def item_impl(self):
>>> from eventlet import pools
>>> pool = pools.TokenPool(max_size=4)
>>> with pool.item() as obj:
... print "got token"
...
... print("got token")
...
got token
>>> pool.free()
4
@@ -194,3 +196,4 @@ class TokenPool(Pool):
"""
def create(self):
return Token()

View File

@@ -38,7 +38,7 @@ case the notification is performed immediatelly:
>>> try:
... p.link()
... except LinkedCompleted:
... print 'LinkedCompleted'
... print('LinkedCompleted')
LinkedCompleted
(Without an argument, the link is created to the current greenlet)
@@ -55,7 +55,7 @@ must fail as well; :meth:`~eventlet.proc.Source.link_exception` is useful here:
>>> try:
... api.sleep(1)
... except LinkedFailed:
... print 'LinkedFailed'
... print('LinkedFailed')
LinkedFailed
One application of linking is :func:`waitall` function: link to a bunch of
@@ -722,3 +722,4 @@ class Pool(object):
g.link(lambda *_args: self.semaphore.release())
return g

View File

@@ -1,17 +1,17 @@
# Copyright (c) 2009 Denis Bilenko, denis.bilenko at gmail com
# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
# and licensed under the MIT license:
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -22,23 +22,24 @@
"""Synchronized queues.
The :mod:`eventlet.queue` module implements multi-producer, multi-consumer
queues that work across greenlets, with the API similar to the classes found in
the standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>`
The :mod:`eventlet.queue` module implements multi-producer, multi-consumer
queues that work across greenlets, with the API similar to the classes found in
the standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>`
modules.
A major difference is that queues in this module operate as channels when
initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty`
and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until
and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until
a call to :meth:`Queue.get` retrieves the item.
An interesting difference, made possible because of greenthreads, is
that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be
used as indicators of whether the subsequent :meth:`Queue.get`
or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting`
and :meth:`Queue.putting` report on the number of greenthreads blocking
An interesting difference, made possible because of greenthreads, is
that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be
used as indicators of whether the subsequent :meth:`Queue.get`
or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting`
and :meth:`Queue.putting` report on the number of greenthreads blocking
in :meth:`put <Queue.put>` or :meth:`get <Queue.get>` respectively.
"""
from __future__ import print_function
import sys
import heapq
@@ -81,7 +82,7 @@ class Waiter(object):
def __str__(self):
"""
>>> print Waiter()
>>> print(Waiter())
<Waiter greenlet=None>
"""
if self.waiting:
@@ -133,8 +134,8 @@ class Waiter(object):
class LightQueue(object):
"""
This is a variant of Queue that behaves mostly like the standard
:class:`Queue`. It differs by not supporting the
This is a variant of Queue that behaves mostly like the standard
:class:`Queue`. It differs by not supporting the
:meth:`task_done <Queue.task_done>` or :meth:`join <Queue.join>` methods,
and is a little faster for not having that overhead.
"""
@@ -190,14 +191,14 @@ class LightQueue(object):
# Maybe wake some stuff up
self._schedule_unlock()
self.maxsize = size
def putting(self):
"""Returns the number of greenthreads that are blocked waiting to put
items into the queue."""
return len(self.putters)
def getting(self):
"""Returns the number of greenthreads that are blocked waiting on an
"""Returns the number of greenthreads that are blocked waiting on an
empty queue."""
return len(self.getters)
@@ -357,17 +358,17 @@ class ItemWaiter(Waiter):
def __init__(self, item):
Waiter.__init__(self)
self.item = item
class Queue(LightQueue):
'''Create a queue object with a given maximum size.
If *maxsize* is less than zero or ``None``, the queue size is infinite.
``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
until the item is delivered. (This is unlike the standard :class:`Queue`,
``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
until the item is delivered. (This is unlike the standard :class:`Queue`,
where 0 means infinite size).
In all other respects, this Queue class resembled the standard library,
:class:`Queue`.
'''
@@ -402,7 +403,7 @@ class Queue(LightQueue):
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
'''
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
@@ -450,3 +451,4 @@ class LifoQueue(Queue):
def _get(self):
return self.queue.pop()

View File

@@ -56,23 +56,23 @@ if __name__=='__main__':
sys.exit('Supply number of test as an argument, 0, 1, 2 or 3')
from twisted.internet import reactor
def test():
print block_on(reactor.resolver.getHostByName('www.google.com'))
print block_on(reactor.resolver.getHostByName('###'))
print(block_on(reactor.resolver.getHostByName('www.google.com')))
print(block_on(reactor.resolver.getHostByName('###')))
if num==0:
test()
elif num==1:
spawn(test)
from eventlet.api import sleep
print 'sleeping..'
print('sleeping..')
sleep(5)
print 'done sleeping..'
print('done sleeping..')
elif num==2:
from eventlet.twistedutil import join_reactor
spawn(test)
reactor.run()
elif num==3:
from eventlet.twistedutil import join_reactor
print "fails because it's impossible to use block_on from the mainloop"
print("fails because it's impossible to use block_on from the mainloop")
reactor.callLater(0, test)
reactor.run()

View File

@@ -20,7 +20,7 @@ def g_log(*args):
ident = '%08X' % (g_id,)
else:
ident = 'greenlet-%d' % (g_id,)
print >>sys.stderr, '[%s] %s' % (ident, ' '.join(map(str, args)))
print('[%s] %s' % (ident, ' '.join(map(str, args))), file=sys.stderr)
__original_socket__ = socket.socket

View File

@@ -6,13 +6,13 @@ from zmq.devices import Device
if __name__ == "__main__":
usage = 'usage: chat_bridge sub_address pub_address'
if len (sys.argv) != 3:
print usage
print(usage)
sys.exit(1)
sub_addr = sys.argv[1]
pub_addr = sys.argv[2]
print "Recieving on %s" % sub_addr
print "Sending on %s" % pub_addr
print("Recieving on %s" % sub_addr)
print("Sending on %s" % pub_addr)
device = Device(FORWARDER, SUB, PUB)
device.bind_in(sub_addr)
device.setsockopt_in(SUBSCRIBE, "")

View File

@@ -7,7 +7,7 @@ participants = set()
def read_chat_forever(writer, reader):
line = reader.readline()
while line:
print "Chat:", line.strip()
print("Chat:", line.strip())
for p in participants:
try:
if p is not writer: # Don't echo
@@ -20,18 +20,18 @@ def read_chat_forever(writer, reader):
raise
line = reader.readline()
participants.remove(writer)
print "Participant left chat."
print("Participant left chat.")
try:
print "ChatServer starting up on port %s" % PORT
print("ChatServer starting up on port %s" % PORT)
server = eventlet.listen(('0.0.0.0', PORT))
while True:
new_connection, address = server.accept()
print "Participant joined chat."
print("Participant joined chat.")
new_writer = new_connection.makefile('w')
participants.add(new_writer)
eventlet.spawn_n(read_chat_forever,
new_writer,
new_connection.makefile('r'))
except (KeyboardInterrupt, SystemExit):
print "ChatServer exiting."
print("ChatServer exiting.")

View File

@@ -2,17 +2,21 @@
Demonstrates how to use the eventlet.green.socket module.
"""
from __future__ import print_function
import eventlet
from eventlet.green import socket
def geturl(url):
c = socket.socket()
ip = socket.gethostbyname(url)
c.connect((ip, 80))
print '%s connected' % url
print('%s connected' % url)
c.sendall('GET /\r\n\r\n')
return c.recv(1024)
urls = ['www.google.com', 'www.yandex.ru', 'www.python.org']
pile = eventlet.GreenPile()
for x in urls:
@@ -21,5 +25,4 @@ for x in urls:
# note that the pile acts as a collection of return values from the functions
# if any exceptions are raised by the function they'll get raised here
for url, result in zip(urls, pile):
print '%s: %s' % (url, repr(result)[:50])
print('%s: %s' % (url, repr(result)[:50]))

View File

@@ -98,7 +98,7 @@ port = None
if __name__ == "__main__":
usage = 'usage: websocket_chat -p pub address -s sub address port number'
if len (sys.argv) != 6:
print usage
print(usage)
sys.exit(1)
pub_addr = sys.argv[2]
@@ -106,22 +106,22 @@ if __name__ == "__main__":
try:
port = int(sys.argv[5])
except ValueError:
print "Error port supplied couldn't be converted to int\n", usage
print("Error port supplied couldn't be converted to int\n", usage)
sys.exit(1)
try:
pub_socket = ctx.socket(zmq.PUB)
pub_socket.connect(pub_addr)
print "Publishing to %s" % pub_addr
print("Publishing to %s" % pub_addr)
sub_socket = ctx.socket(zmq.SUB)
sub_socket.connect(sub_addr)
sub_socket.setsockopt(zmq.SUBSCRIBE, "")
print "Subscribing to %s" % sub_addr
print("Subscribing to %s" % sub_addr)
except:
print "Couldn't create sockets\n", usage
print("Couldn't create sockets\n", usage)
sys.exit(1)
spawn_n(subscribe_and_distribute, sub_socket)
listener = eventlet.listen(('127.0.0.1', port))
print "\nVisit http://localhost:%s/ in your websocket-capable browser.\n" % port
print("\nVisit http://localhost:%s/ in your websocket-capable browser.\n" % port)
wsgi.server(listener, dispatch)

View File

@@ -9,27 +9,28 @@ Connect to it with:
You terminate your connection by terminating telnet (typically Ctrl-]
and then 'quit')
"""
from __future__ import print_function
import eventlet
def handle(fd):
print "client connected"
print("client connected")
while True:
# pass through every non-eof line
x = fd.readline()
if not x: break
fd.write(x)
fd.flush()
print "echoed", x,
print "client disconnected"
print("echoed", x, end=' ')
print("client disconnected")
print "server socket listening on port 6000"
print("server socket listening on port 6000")
server = eventlet.listen(('0.0.0.0', 6000))
pool = eventlet.GreenPool()
while True:
try:
new_sock, address = server.accept()
print "accepted", address
print("accepted", address)
pool.spawn_n(handle, new_sock.makefile('rw'))
except (SystemExit, KeyboardInterrupt):
break

View File

@@ -22,4 +22,4 @@ http://ln.hixie.ch/rss/html
url = 'http://localhost:9010/'
result = urllib2.urlopen(url, big_list_of_feeds)
print result.read()
print(result.read())

View File

@@ -6,7 +6,7 @@ starting from a simple framework like this.
import eventlet
def closed_callback():
print "called back"
print("called back")
def forward(source, dest, cb = lambda: None):
"""Forwards bytes unidirectionally from source to dest"""

View File

@@ -21,7 +21,7 @@ url_regex = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:pun
def fetch(url, outq):
"""Fetch a url and push any urls found into a queue."""
print "fetching", url
print("fetching", url)
data = ''
with eventlet.Timeout(5, False):
data = urllib2.urlopen(url).read()
@@ -53,5 +53,5 @@ def producer(start_url):
seen = producer("http://eventlet.net")
print "I saw these urls:"
print "\n".join(seen)
print("I saw these urls:")
print("\n".join(seen))

View File

@@ -22,7 +22,7 @@ url_regex = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:pun
def fetch(url, seen, pool):
"""Fetch a url, stick any found urls into the seen set, and
dispatch any new ones to the pool."""
print "fetching", url
print("fetching", url)
data = ''
with eventlet.Timeout(5, False):
data = urllib2.urlopen(url).read()
@@ -45,5 +45,5 @@ def crawl(start_url):
return seen
seen = crawl("http://eventlet.net")
print "I saw these urls:"
print "\n".join(seen)
print("I saw these urls:")
print("\n".join(seen))

View File

@@ -13,14 +13,14 @@ from twisted.internet import reactor
conn = GreenClientCreator(reactor).connectTCP('www.google.com', 80)
conn.write('GET / HTTP/1.0\r\n\r\n')
conn.loseWriteConnection()
print conn.read()
print(conn.read())
# read from SSL connection line by line
conn = GreenClientCreator(reactor, LineOnlyReceiverTransport).connectSSL('sf.net', 443, ssl.ClientContextFactory())
conn.write('GET / HTTP/1.0\r\n\r\n')
try:
for num, line in enumerate(conn):
print '%3s %r' % (num, line)
print('%3s %r' % (num, line))
except ConnectionClosed as ex:
print ex
print(ex)

View File

@@ -48,7 +48,7 @@ def http_request(method, host, path, headers):
conn.request(method, path, headers=headers)
response = conn.getresponse()
body = response.read()
print method, host, path, response.status, response.reason, len(body)
print(method, host, path, response.status, response.reason, len(body))
return format_response(response, body)
def format_response(response, body):
@@ -64,6 +64,6 @@ def format_response(response, body):
class MyFactory(Factory):
protocol = LineOnlyReceiver
print __doc__
print(__doc__)
reactor.listenTCP(8888, MyFactory())
reactor.run()

View File

@@ -12,19 +12,19 @@ def forward(source, dest):
x = source.recv()
if not x:
break
print 'forwarding %s bytes' % len(x)
print('forwarding %s bytes' % len(x))
dest.write(x)
finally:
dest.loseConnection()
def handler(local):
client = str(local.getHost())
print 'accepted connection from %s' % client
print('accepted connection from %s' % client)
remote = GreenClientCreator(reactor, UnbufferedTransport).connectTCP(remote_host, remote_port)
a = proc.spawn(forward, remote, local)
b = proc.spawn(forward, local, remote)
proc.waitall([a, b], trap_errors=True)
print 'closed connection to %s' % client
print('closed connection to %s' % client)
try:
local_port, remote_host, remote_port = sys.argv[1:]

View File

@@ -16,25 +16,25 @@ class Chat:
def handler(self, conn):
peer = conn.getPeer()
print 'new connection from %s' % (peer, )
print('new connection from %s' % (peer, ))
conn.write("Welcome! There're %s participants already\n" % (len(self.participants)))
self.participants.append(conn)
try:
for line in conn:
if line:
print 'received from %s: %s' % (peer, line)
print('received from %s: %s' % (peer, line))
for buddy in self.participants:
if buddy is not conn:
buddy.sendline('from %s: %s' % (peer, line))
except Exception as ex:
print peer, ex
print(peer, ex)
else:
print peer, 'connection done'
print(peer, 'connection done')
finally:
conn.loseConnection()
self.participants.remove(conn)
print __doc__
print(__doc__)
chat = Chat()
from twisted.internet import reactor
reactor.listenTCP(8007, SpawnFactory(chat.handler, LineOnlyReceiverTransport))

View File

@@ -9,7 +9,7 @@ class NoisySRVConnector(SRVConnector):
def pickServer(self):
host, port = SRVConnector.pickServer(self)
print 'Resolved _%s._%s.%s --> %s:%s' % (self.service, self.protocol, self.domain, host, port)
print('Resolved _%s._%s.%s --> %s:%s' % (self.service, self.protocol, self.domain, host, port))
return host, port
cred = X509Credentials(None, None)
@@ -24,10 +24,10 @@ From-Path: msrps://alice.example.com:9892/98cjs;tcp
-------49fh$
""".replace('\n', '\r\n')
print 'Sending:\n%s' % request
print('Sending:\n%s' % request)
conn.write(request)
print 'Received:'
print('Received:')
for x in conn:
print repr(x)
print(repr(x))
if '-------' in x:
break

View File

@@ -10,7 +10,7 @@ from eventlet.twistedutil import join_reactor
class LineOnlyReceiver(basic.LineOnlyReceiver):
def lineReceived(self, line):
print 'received: %r' % line
print('received: %r' % line)
if not line:
return
app, context, node = (line + ' ').split(' ', 3)

View File

@@ -19,12 +19,12 @@ urls = [
def fetch(url):
print "opening", url
print("opening", url)
body = urllib2.urlopen(url).read()
print "done with", url
print("done with", url)
return url, body
pool = eventlet.GreenPool(200)
for url, body in pool.imap(fetch, urls):
print "got body from", url, "of length", len(body)
print("got body from", url, "of length", len(body))

View File

@@ -36,5 +36,5 @@ def dispatch(environ, start_response):
if __name__ == "__main__":
# run an example app from the command line
listener = eventlet.listen(('127.0.0.1', 7000))
print "\nVisit http://localhost:7000/ in your websocket-capable browser.\n"
print("\nVisit http://localhost:7000/ in your websocket-capable browser.\n")
wsgi.server(listener, dispatch)

View File

@@ -33,5 +33,5 @@ def dispatch(environ, start_response):
if __name__ == "__main__":
# run an example app from the command line
listener = eventlet.listen(('127.0.0.1', PORT))
print "\nVisit http://localhost:7000/ in your websocket-capable browser.\n"
print("\nVisit http://localhost:7000/ in your websocket-capable browser.\n")
wsgi.server(listener, dispatch)

View File

@@ -9,7 +9,7 @@ ctx = zmq.Context()
def publish(writer):
print "connected"
print("connected")
socket = ctx.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, "")
@@ -30,7 +30,7 @@ def read_chat_forever(reader, pub_socket):
line = reader.readline()
who = 'someone'
while line:
print "Chat:", line.strip()
print("Chat:", line.strip())
if line.startswith('name:'):
who = line.split(':')[-1].strip()
@@ -42,10 +42,10 @@ def read_chat_forever(reader, pub_socket):
if e[0] != 32:
raise
line = reader.readline()
print "Participant left chat."
print("Participant left chat.")
try:
print "ChatServer starting up on port %s" % PORT
print("ChatServer starting up on port %s" % PORT)
server = eventlet.listen(('0.0.0.0', PORT))
pub_socket = ctx.socket(zmq.PUB)
pub_socket.bind(ADDR)
@@ -54,11 +54,11 @@ try:
while True:
new_connection, address = server.accept()
print "Participant joined chat."
print("Participant joined chat.")
eventlet.spawn_n(publish,
new_connection.makefile('w'))
eventlet.spawn_n(read_chat_forever,
new_connection.makefile('r'),
pub_socket)
except (KeyboardInterrupt, SystemExit):
print "ChatServer exiting."
print("ChatServer exiting.")

View File

@@ -4,24 +4,24 @@ import eventlet
CTX = zmq.Context(1)
def bob_client(ctx, count):
print "STARTING BOB"
print("STARTING BOB")
bob = zmq.Socket(CTX, zmq.REQ)
bob.connect("ipc:///tmp/test")
for i in range(0, count):
print "BOB SENDING"
print("BOB SENDING")
bob.send("HI")
print "BOB GOT:", bob.recv()
print("BOB GOT:", bob.recv())
def alice_server(ctx, count):
print "STARTING ALICE"
print("STARTING ALICE")
alice = zmq.Socket(CTX, zmq.REP)
alice.bind("ipc:///tmp/test")
print "ALICE READY"
print("ALICE READY")
for i in range(0, count):
print "ALICE GOT:", alice.recv()
print "ALIC SENDING"
print("ALICE GOT:", alice.recv())
print("ALIC SENDING")
alice.send("HI BACK")
alice = eventlet.spawn(alice_server, CTX, 10)

View File

@@ -1,4 +1,6 @@
# package is named tests, not test, so it won't be confused with test in stdlib
from __future__ import print_function
import errno
import os
try:
@@ -34,7 +36,7 @@ def skipped(func):
except ImportError:
# no nose, we'll just skip the test ourselves
def skipme(*a, **k):
print "Skipping", func.__name__
print(("Skipping {0}".format(func.__name__)))
skipme.__name__ = func.__name__
return skipme
@@ -264,7 +266,7 @@ def get_database_auth():
try:
import simplejson as json
except ImportError:
print "No json implementation, using baked-in db credentials."
print("No json implementation, using baked-in db credentials.")
return retval
if 'EVENTLET_DB_TEST_AUTH' in os.environ:

View File

@@ -4,6 +4,7 @@ from eventlet.green import socket
from tests import LimitedTestCase, main
class BackdoorTest(LimitedTestCase):
def test_server(self):
listener = socket.socket()
@@ -27,8 +28,7 @@ class BackdoorTest(LimitedTestCase):
serv.kill()
# wait for the console to discover that it's dead
eventlet.sleep(0.1)
if __name__ == '__main__':
main()

View File

@@ -1,6 +1,6 @@
'''Test cases for db_pool
'''
from __future__ import with_statement
from __future__ import print_function
import sys
import os
@@ -430,8 +430,8 @@ class DBConnectionPool(DBTester):
end = time.time()
results.append(end-start)
print "\n%u iterations took an average of %f seconds, (%s) in %s\n" % (
iterations, sum(results)/len(results), results, type(self))
print("\n%u iterations took an average of %f seconds, (%s) in %s\n" % (
iterations, sum(results)/len(results), results, type(self)))
def test_raising_create(self):
# if the create() method raises an exception the pool should
@@ -515,12 +515,12 @@ def mysql_requirement(_f):
return True
except MySQLdb.OperationalError:
if verbose:
print >> sys.stderr, ">> Skipping mysql tests, error when connecting:"
print(">> Skipping mysql tests, error when connecting:", file=sys.stderr)
traceback.print_exc()
return False
except ImportError:
if verbose:
print >> sys.stderr, ">> Skipping mysql tests, MySQLdb not importable"
print(">> Skipping mysql tests, MySQLdb not importable", file=sys.stderr)
return False
@@ -584,10 +584,10 @@ def postgres_requirement(_f):
psycopg2.connect(**auth)
return True
except psycopg2.OperationalError:
print "Skipping postgres tests, error when connecting"
print("Skipping postgres tests, error when connecting")
return False
except ImportError:
print "Skipping postgres tests, psycopg2 not importable"
print("Skipping postgres tests, psycopg2 not importable")
return False

View File

@@ -394,6 +394,8 @@ class TestDefaultHub(ProcessBase):
# https://github.com/eventlet/eventlet/issues/38
# get_hub on windows broken by kqueue
module_source = r'''
from __future__ import print_function
# Simulate absence of kqueue even on platforms that support it.
import select
try:

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
import os
import sys
import time
@@ -10,28 +12,30 @@ try:
except ImportError:
MySQLdb = False
def mysql_requirement(_f):
"""We want to skip tests if using pyevent, MySQLdb is not installed, or if
there is no database running on the localhost that the auth file grants
us access to.
This errs on the side of skipping tests if everything is not right, but
it's better than a million tests failing when you don't care about mysql
support."""
if using_pyevent(_f):
return False
if MySQLdb is False:
print "Skipping mysql tests, MySQLdb not importable"
print("Skipping mysql tests, MySQLdb not importable")
return False
try:
auth = get_database_auth()['MySQLdb'].copy()
MySQLdb.connect(**auth)
return True
except MySQLdb.OperationalError:
print "Skipping mysql tests, error when connecting:"
print("Skipping mysql tests, error when connecting:")
traceback.print_exc()
return False
class MySQLdbTester(LimitedTestCase):
def setUp(self):
super(MySQLdbTester, self).setUp()
@@ -55,7 +59,7 @@ class MySQLdbTester(LimitedTestCase):
super(MySQLdbTester, self).tearDown()
@skip_unless(mysql_requirement)
@skip_unless(mysql_requirement)
def create_db(self):
auth = self._auth.copy()
try:
@@ -122,7 +126,7 @@ class MySQLdbTester(LimitedTestCase):
rows = cursor.fetchall()
self.assertEqual(rows, ((1L,),))
self.assert_cursor_yields(cursor)
def assert_connection_works(self, conn):
curs = conn.cursor()
self.assert_cursor_works(curs)
@@ -136,7 +140,7 @@ class MySQLdbTester(LimitedTestCase):
def test_connecting(self):
self.assert_(self.connection is not None)
def test_connecting_annoyingly(self):
self.assert_connection_works(MySQLdb.Connect(**self._auth))
self.assert_connection_works(MySQLdb.Connection(**self._auth))
@@ -214,10 +218,11 @@ class MySQLdbTester(LimitedTestCase):
curs.execute("delete from gargleblatz where a=314159")
conn.commit()
from tests import patcher_test
class MonkeyPatchTester(patcher_test.ProcessBase):
@skip_unless(mysql_requirement)
@skip_unless(mysql_requirement)
def test_monkey_patching(self):
output, lines = self.run_script("""
from eventlet import patcher

View File

@@ -88,10 +88,10 @@ def main(db):
except Exception:
parse_error += 1
sys.stderr.write('Failed to parse id=%s\n' % id)
print repr(stdout)
print(repr(stdout))
traceback.print_exc()
else:
print id, hub, testname, runs, errors, fails, timeouts
print(id, hub, testname, runs, errors, fails, timeouts)
c.execute('insert into parsed_command_record '
'(id, testname, hub, runs, errors, fails, timeouts) '
'values (?, ?, ?, ?, ?, ?, ?)',
@@ -101,7 +101,7 @@ def main(db):
if __name__=='__main__':
if not sys.argv[1:]:
latest_db = sorted(glob.glob('results.*.db'), key=lambda f: os.stat(f).st_mtime)[-1]
print latest_db
print(latest_db)
sys.argv.append(latest_db)
for db in sys.argv[1:]:
main(db)

View File

@@ -49,7 +49,7 @@ class PatchingPsycopg(patcher_test.ProcessBase):
self.write_to_tempfile("psycopg_patcher", psycopg_test_file)
output, lines = self.launch_subprocess('psycopg_patcher.py')
if lines[0].startswith('Psycopg not monkeypatched'):
print "Can't test psycopg2 patching; it's not installed."
print("Can't test psycopg2 patching; it's not installed.")
return
# if there's anything wrong with the test program it'll have a stack trace
self.assert_(lines[0].startswith('done'), output)

View File

@@ -20,7 +20,7 @@ def assimilate_patched(name):
modobj = __import__(name, globals(), locals(), ['test_main'])
restart_hub()
except ImportError:
print "Not importing %s, it doesn't exist in this installation/version of Python" % name
print("Not importing %s, it doesn't exist in this installation/version of Python" % name)
return
else:
method_name = name + "_test_main"
@@ -33,7 +33,7 @@ def assimilate_patched(name):
globals()[method_name] = test_main
test_main.__name__ = name + '.test_main'
except AttributeError:
print "No test_main for %s, assuming it tests on import" % name
print("No test_main for %s, assuming it tests on import" % name)
import all_modules

View File

@@ -35,7 +35,7 @@ def get_modules():
s.close()
test_modules = test_modules + network_modules
except socket.error as e:
print "Skipping network tests"
print("Skipping network tests")
return test_modules

View File

@@ -4,11 +4,11 @@ from eventlet import patcher
patcher.monkey_patch()
def assimilate_real(name):
print "Assimilating", name
print("Assimilating", name)
try:
modobj = __import__('test.' + name, globals(), locals(), ['test_main'])
except ImportError:
print "Not importing %s, it doesn't exist in this installation/version of Python" % name
print("Not importing %s, it doesn't exist in this installation/version of Python" % name)
return
else:
method_name = name + "_test_main"
@@ -16,7 +16,7 @@ def assimilate_real(name):
globals()[method_name] = modobj.test_main
modobj.test_main.__name__ = name + '.test_main'
except AttributeError:
print "No test_main for %s, assuming it tests on import" % name
print("No test_main for %s, assuming it tests on import" % name)
import all_modules

View File

@@ -64,11 +64,11 @@ def run_and_check(run_client):
w = run_interaction(run_client=run_client)
clear_sys_exc_info()
if w():
print pformat(gc.get_referrers(w()))
print(pformat(gc.get_referrers(w())))
for x in gc.get_referrers(w()):
print pformat(x)
print(pformat(x))
for y in gc.get_referrers(x):
print '-', pformat(y)
print('-', pformat(y))
raise AssertionError('server should be dead by now')

View File

@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import itertools
import random
@@ -111,7 +112,7 @@ class TestTpool(LimitedTestCase):
self.assertEqual(hash(prox1), hash(prox2))
proxList = tpool.Proxy([])
self.assertRaises(TypeError, hash, proxList)
@skip_with_pyevent
def test_wrap_nonzero(self):
prox = tpool.Proxy(re)
@@ -148,7 +149,7 @@ class TestTpool(LimitedTestCase):
for i in prox:
result.append(i)
self.assertEquals(range(10), result)
@skip_with_pyevent
def test_wrap_iterator2(self):
self.reset_timeout(5) # might take a while due to imprecise sleeping
@@ -157,7 +158,7 @@ class TestTpool(LimitedTestCase):
for x in xrange(2):
yield x
time.sleep(0.001)
counter = [0]
def tick():
for i in xrange(20000):
@@ -166,7 +167,7 @@ class TestTpool(LimitedTestCase):
eventlet.sleep(0.0001)
else:
eventlet.sleep()
gt = eventlet.spawn(tick)
previtem = 0
for item in tpool.Proxy(foo()):
@@ -269,13 +270,13 @@ class TestTpool(LimitedTestCase):
x = tpool.Proxy(wrapped, autowrap_names=('__call__',))
for r in x(3):
self.assertEquals(3, r)
@skip_with_pyevent
def test_eventlet_timeout(self):
def raise_timeout():
raise eventlet.Timeout()
self.assertRaises(eventlet.Timeout, tpool.execute, raise_timeout)
@skip_with_pyevent
def test_tpool_set_num_threads(self):
tpool.set_num_threads(5)
@@ -291,7 +292,7 @@ class TpoolLongTests(LimitedTestCase):
assert_(token is not None)
time.sleep(random.random()/200.0)
return token
def sender_loop(loopnum):
obj = tpool.Proxy(Dummy())
count = 100
@@ -310,7 +311,7 @@ class TpoolLongTests(LimitedTestCase):
results = list(pile)
self.assertEquals(len(results), cnt)
tpool.killall()
@skipped
def test_benchmark(self):
""" Benchmark computing the amount of overhead tpool adds to function calls."""
@@ -329,8 +330,8 @@ from eventlet.tpool import execute
best_tpool = min(results)
tpool_overhead = (best_tpool-best_normal)/iterations
print "%s iterations\nTpool overhead is %s seconds per call. Normal: %s; Tpool: %s" % (
iterations, tpool_overhead, best_normal, best_tpool)
print("%s iterations\nTpool overhead is %s seconds per call. Normal: %s; Tpool: %s" % (
iterations, tpool_overhead, best_normal, best_tpool))
tpool.killall()
@skip_with_pyevent

View File

@@ -22,6 +22,7 @@ server / client accept() conn - ExplodingConnectionWrap
V V V
connection makefile() file objects - ExplodingSocketFile <-- these raise
"""
from __future__ import print_function
import eventlet
@@ -52,12 +53,12 @@ class NaughtySocketAcceptWrap(object):
conn_wrap.unwrap()
def arm(self):
print "ca-click"
print("ca-click")
for i in self.conn_reg:
i.arm()
def __call__(self):
print self.__class__.__name__ + ".__call__"
print(self.__class__.__name__ + ".__call__")
conn, addr = self.sock._really_accept()
self.conn_reg.append(ExplodingConnectionWrap(conn))
return conn, addr
@@ -80,12 +81,12 @@ class ExplodingConnectionWrap(object):
del self.conn._really_makefile
def arm(self):
print "tick"
print("tick")
for i in self.file_reg:
i.arm()
def __call__(self, mode='r', bufsize=-1):
print self.__class__.__name__ + ".__call__"
print(self.__class__.__name__ + ".__call__")
# file_obj = self.conn._really_makefile(*args, **kwargs)
file_obj = ExplodingSocketFile(self.conn._sock, mode, bufsize)
self.file_reg.append(file_obj)
@@ -99,24 +100,24 @@ class ExplodingSocketFile(socket._fileobject):
self.armed = False
def arm(self):
print "beep"
print("beep")
self.armed = True
def _fuse(self):
if self.armed:
print "=== ~* BOOM *~ ==="
print("=== ~* BOOM *~ ===")
raise socket.timeout("timed out")
def readline(self, *args, **kwargs):
print self.__class__.__name__ + ".readline"
print(self.__class__.__name__ + ".readline")
self._fuse()
return super(self.__class__, self).readline(*args, **kwargs)
if __name__ == '__main__':
for debug in (False, True):
print "SEPERATOR_SENTINEL"
print "debug set to: %s" % debug
print("SEPERATOR_SENTINEL")
print("debug set to: %s" % debug)
server_sock = eventlet.listen(('localhost', 0))
server_addr = server_sock.getsockname()
@@ -142,7 +143,7 @@ if __name__ == '__main__':
# let the server socket ops catch up, set bomb
eventlet.sleep(0)
print "arming..."
print("arming...")
sock_wrap.arm()
# req #2 - old conn, post-arm - timeout