A simple web crawler in Python that crawls and returns the urls.
If you have downloaded the source code:
python setup.py install
Create indexes on both collections in MongoDB.
use web_snake
db.crawled_urls.createIndex( { "hash" : 1 } )
db.crawled_domains.createIndex( { "domain" : 1 } )
from Queue import Queue
from web_snake.crawler import Crawler
from web_snake.proxies import Proxies
from web_snake.domain_storage import DomainStorage
from web_snake.url_storage import UrlStorage
from web_snake.result_set import ResultSet
crawl_queue = Queue()
crawl_queue.put('http://www.reddit.com/')
result = ResultSet()
proxies = Proxies('../../commondata/proxies.txt')
urls = UrlStorage()
domains = DomainStorage()
crawler = Crawler(crawl_queue=crawl_queue, result=result, domains=domains, urls=urls, max_level=3, proxies=proxies)
crawler.start()
crawler.join()
print "Found {number} links...".format(number=len(result.all())