use a tuple of URLs that do not 404

This commit is contained in:
Thomas Grainger
2013-01-03 18:43:48 +00:00
parent 63b97c0d95
commit 2470941639

View File

@@ -8,19 +8,22 @@ The prints in the body of the fetch function are there to demonstrate that the
requests are truly made in parallel.
"""
urls = ["http://www.google.com/intl/en_ALL/images/logo.gif",
"https://wiki.secondlife.com/w/images/secondlife.jpg",
"http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"]
urls = (
"https://www.google.com/intl/en_ALL/images/logo.gif",
"http://python.org/images/python-logo.gif",
"http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"
)
import eventlet
from eventlet.green import urllib2
from eventlet.green import urllib2
def fetch(url):
print "opening", url
body = urllib2.urlopen(url).read()
print "done with", url
return url, body
print "opening", url
body = urllib2.urlopen(url).read()
print "done with", url
return url, body
pool = eventlet.GreenPool(200)
for url, body in pool.imap(fetch, urls):
print "got body from", url, "of length", len(body)
print "got body from", url, "of length", len(body)