Permalink
Browse files

Scraping the rest of the individual tweets. Started work on importing…

… to database.
  • Loading branch information...
1 parent 44966f6 commit 21bae8343f42c78c9ad753c712925e5c058fb304 @schneidy committed Mar 6, 2012
Showing with 21 additions and 3 deletions.
  1. +21 −3 scrape_tweets.py
View
24 scrape_tweets.py
@@ -1,9 +1,27 @@
from twython import Twython
-from oauth import twitter_token, twitter_secret, oauth_token, oauth_token_secret
+from settings import *
+from datetime import datetime
+import dateutil.parser as parser
+import MySQLdb as mdb
+import sys
twitter = Twython(twitter_token, twitter_secret, oauth_token, oauth_token_secret)
-search_results = twitter.searchTwitter(q="Super Tuesday", rpp="100")
+query_subjects = ["Super Tuesday", "Romney", "Santorum", "Ron Paul", "Gingrich", "Obama"]
+
+
+search_results = twitter.searchTwitter(q="Super Tuesday", rpp="10", result_type="current", page="1")
for tweet in search_results["results"]:
- print tweet['text'].encode('utf-8'),"\n"
+ tweet_id = tweet['id_str']
+ user = tweet['from_user'].encode('utf-8')
+ user_id = tweet['from_user_id_str']
+ created_at = (parser.parse(tweet['created_at']))
+ text = tweet['text'].encode('utf-8')
+ geo = tweet['geo']
+ coordinates = geo['coordinates'] if geo != None else None
+ scraped_at = datetime.now()
+ source = tweet['source']
+
+def insertToDB(self, table_name):
+ con = mdb.connect(host, user, password, db)

0 comments on commit 21bae83

Please sign in to comment.