Permalink
Browse files

Fork of code from ScraperWiki at https://classic.scraperwiki.com/scra…

  • Loading branch information...
tesnik committed May 20, 2015
0 parents commit 0e2553c57b0dc9751d5a31a832fd278911c6641b
Showing with 24 additions and 0 deletions.
  1. +2 −0 .gitignore
  2. +22 −0 scraper.py
@@ -0,0 +1,2 @@
# Ignore output of scraper
data.sqlite
@@ -0,0 +1,22 @@
import urllib2
from xml.dom.minidom import parseString
def get_google_new_results( term, count ):
results = []
obj = parseString( urllib2.urlopen('http://news.google.com/news?q=%s&output=rss' % term).read() )
elements = obj.getElementsByTagName('title')[2:] # To get rid of unwanted title elements in XML doc
links = obj.getElementsByTagName('link')[2:]
print links
for element in elements[:count]:
headline = element.childNodes[0].data
for link in links:
url = link.childNodes[0].data.split('=')[-1]
newssearch = headline + ' -> ' + url
results.append( newssearch )
return results
items = get_google_new_results( 'apple', 2 )
for i,e in enumerate(items):
print '%d: %s' % (i+1,e,)

0 comments on commit 0e2553c

Please sign in to comment.