diff --git a/examples/webcrawler.py b/examples/webcrawler.py index a4f5e2a..50349be 100644 --- a/examples/webcrawler.py +++ b/examples/webcrawler.py @@ -8,19 +8,22 @@ The prints in the body of the fetch function are there to demonstrate that the requests are truly made in parallel. """ -urls = ["http://www.google.com/intl/en_ALL/images/logo.gif", - "https://wiki.secondlife.com/w/images/secondlife.jpg", - "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"] +urls = ( + "https://www.google.com/intl/en_ALL/images/logo.gif", + "http://python.org/images/python-logo.gif", + "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif" +) import eventlet -from eventlet.green import urllib2 +from eventlet.green import urllib2 + def fetch(url): - print "opening", url - body = urllib2.urlopen(url).read() - print "done with", url - return url, body + print "opening", url + body = urllib2.urlopen(url).read() + print "done with", url + return url, body pool = eventlet.GreenPool(200) for url, body in pool.imap(fetch, urls): - print "got body from", url, "of length", len(body) \ No newline at end of file + print "got body from", url, "of length", len(body)