Skip to content

Commit

Permalink
Fork of code from ScraperWiki at https://classic.scraperwiki.com/scra…
Browse files Browse the repository at this point in the history
  • Loading branch information
MikeySmith committed Mar 28, 2015
0 parents commit 8fb5f36
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 0 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Ignore output of scraper
data.sqlite
24 changes: 24 additions & 0 deletions scraper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import scraperwiki
import simplejson
import urllib2
from scraperwiki import swimport

myfollowers = []
twitter_handle = 'datahh'

base_url = 'https://api.twitter.com/1/followers/ids.json?cursor=-1&screen_name=' + twitter_handle
results_json = simplejson.loads(scraperwiki.scrape(base_url))
myfollowers = results_json['ids']
myfollowers_str = map(str, myfollowers)



swimport('twitter_bulk_users_lookup').bulklookup(myfollowers_str)


'''
See https://scraperwiki.com/scrapers/twitter_bulk_users_lookup/ for the code for the script
Still to do
-add parameter for ID and username (usertype)
'''

0 comments on commit 8fb5f36

Please sign in to comment.