From 59cc4955652f4643b8862b1c0d50ea4afc39b18f Mon Sep 17 00:00:00 2001 From: Sergey Shepelev Date: Fri, 4 Jan 2013 04:26:31 +0400 Subject: [PATCH] examples: webcrawler: urls tuple->list + more style fixes --- examples/webcrawler.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/examples/webcrawler.py b/examples/webcrawler.py index 50349be..0ffc8b4 100644 --- a/examples/webcrawler.py +++ b/examples/webcrawler.py @@ -1,29 +1,30 @@ -#! /usr/bin/env python +#!/usr/bin/env python """ -This is a simple web "crawler" that fetches a bunch of urls using a pool to +This is a simple web "crawler" that fetches a bunch of urls using a pool to control the number of outbound connections. It has as many simultaneously open connections as coroutines in the pool. The prints in the body of the fetch function are there to demonstrate that the requests are truly made in parallel. """ - -urls = ( - "https://www.google.com/intl/en_ALL/images/logo.gif", - "http://python.org/images/python-logo.gif", - "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif" -) - import eventlet from eventlet.green import urllib2 +urls = [ + "https://www.google.com/intl/en_ALL/images/logo.gif", + "http://python.org/images/python-logo.gif", + "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif", +] + + def fetch(url): print "opening", url body = urllib2.urlopen(url).read() print "done with", url return url, body + pool = eventlet.GreenPool(200) for url, body in pool.imap(fetch, urls): print "got body from", url, "of length", len(body)