This repository has been archived by the owner on Sep 18, 2018. It is now read-only.
forked from ketralnis/reddittweeter
/
reddittweeter.py
executable file
·193 lines (150 loc) · 6.08 KB
/
reddittweeter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
#!/usr/bin/env python
import sys
import json
import time
import urllib2
from calendar import timegm
from datetime import datetime, timedelta
from itertools import chain
from xml.sax.saxutils import unescape as unescape_html
import tweepy, tweepy.error
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer, create_engine
from sqlalchemy.ext.declarative import declarative_base
debug = False
maxtweets = 10 # don't tweet more than this in one session
keepfor = timedelta(days=30) # how long to keep articles in the sqlite
# cache to keep them from being tweeted
# twice
opener = urllib2.build_opener()
opener.addheaders = [("User-agent", "reddittweeter")]
encoding = 'utf-8'
maxlength = 132
################################################################################
Base = declarative_base()
class Article(Base):
__tablename__ = 'article'
id = Column(String, primary_key = True)
timestamp = Column(Integer, index=True)
def __init__(self, id, timestamp):
self.id = id
self.timestamp = timestamp
def link_tokens(data):
link = 'http://redd.it/%s' % data['id']
tokens = [ unescape_html(data['title']),
' [%s]' % data['subreddit'],
# ' %d points' % data['score'],
' submitted by %s' % data['author'],
' [%s]' % data['domain'],
]
return link, tokens
def comment_tokens(data):
link = 'on http://redd.it/%s' % data['link_id'].split('_')[1]
tokens = [ '"%s"' % unescape_html(data['body']),
', commented by %s' % data['author'],
]
return link, tokens
def tweet_item(entry):
if "error" in entry:
raise ValueError(str(entry))
kind = entry['kind']
data = entry['data']
if kind == 'Listing':
for child in entry['data']['children']:
for x in tweet_item(child):
yield x
else:
if kind == 't1':
link, tokens = comment_tokens(data)
elif kind == 't3':
link, tokens = link_tokens(data)
else:
raise ValueError("Unknown reddit type %r" % kind)
fname = data['name']
if data.get('over_18', False):
return
message_postfix = (' %s' % link).encode(encoding)
tokens = [ t.encode(encoding) for t in tokens ]
title, extras = tokens[0], tokens[1:]
if len(title) + len(message_postfix) > maxlength:
title = title[:maxlength-len(title)-len(message_postfix)-3]
message = "%s...%s" % (title, message_postfix)
else:
# add all of the extra tokens that fit within the length
# limit
message = title + message_postfix
for extra in extras:
if len(message) + len(extra) < maxlength:
message += extra
else:
break
yield data['name'], message
def main(sourceurl, twitter_consumer, twitter_secret,
twitter_access_key, twitter_access_secret, dbname):
engine = create_engine('sqlite:///%s' % dbname, echo = debug)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
auth = tweepy.OAuthHandler(twitter_consumer, twitter_secret)
auth.set_access_token(twitter_access_key, twitter_access_secret)
api = tweepy.API(auth)
text = opener.open(sourceurl).read()
parsed = json.loads(text)
# there may be multiple listings, like on a comments-page
if isinstance(parsed, dict):
parsed = [parsed]
assert isinstance(parsed, list)
numtweets = 0
for msg_id, message in chain.from_iterable(tweet_item(x) for x in parsed):
existing = session.query(Article).filter_by(id = msg_id).first()
if existing and debug:
print "Skipping %r" % msg_id
elif not existing:
if numtweets > 0:
# sleep between tweets so as not to hit them too hard
time.sleep(1)
if debug:
print "Tweeting %r: %r" % (msg_id, message)
try:
api.update_status(message)
except tweepy.error.TweepError, e:
# Ignore stupid t.co stuff that screws up character counting
if "too long" in e.reason:
print >> sys.stderr, "Too long: '%s'" % message
# selectively ignore duplicate tweet errors
elif 'duplicate' not in e.reason:
e.reason += " (tweet: [%s])" % message
raise
elif debug:
print "Warning: ignoring duplicate tweet"
timestamp = timegm(datetime.now().timetuple())
session.add(Article(msg_id, timestamp))
session.commit() # commit after every item so that we
# don't tweet the same item twice, even
# if we throw an exception later on
numtweets += 1
if numtweets >= maxtweets:
if debug:
print "Too many tweets (%d/%d). Quitting early" % (numtweets, maxtweets)
break
# clean up old db items to keep it from ballooning in size
expiry = timegm((datetime.now() - keepfor).timetuple())
session.query(Article).filter(Article.timestamp < expiry).delete()
session.commit()
if __name__ == '__main__':
if len(sys.argv) != 3:
print "Usage: reddittweeter CONFIG_FILE URL"
sys.exit(1)
config_filename = sys.argv[1]
sourceurl = sys.argv[2]
import ConfigParser
parser = ConfigParser.RawConfigParser()
with open(config_filename, "r") as f:
parser.readfp(f)
twitter_consumer = parser.get("twitter", "consumer")
twitter_secret = parser.get("twitter", "secret")
twitter_access_key = parser.get("twitter", "access_key")
twitter_access_secret = parser.get("twitter", "access_secret")
dbname = parser.get("storage", "db_path")
main(sourceurl, twitter_consumer, twitter_secret,
twitter_access_key, twitter_access_secret, dbname)