Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
411 lines (395 sloc) 19 KB
#!/usr/bin/env python3.6
import mwclient, configparser, mwparserfromhell, re, twitter, textwrap, argparse, sys
from datetime import *
from dateutil.parser import *
from itertools import tee, islice, zip_longest
from twitter.error import TwitterError
#from time import sleep
import time
import wayback
from mwclient import *
def call_home(site):#config):
#page = site.Pages['User:' + config.get('enwiki','username') + "/status"]
page = site.Pages['User:TweetCiteBot/status']
text = page.text()
if "false" in text.lower():
return False
return True
def allow_bots(text, user):
user = user.lower().strip()
text = mwparserfromhell.parse(text)
for tl in text.filter_templates():
if in ('bots', 'nobots'):
return True
for param in tl.params:
bots = [x.lower().strip() for x in param.value.split(",")]
if == 'allow':
if ''.join(bots) == 'none': return False
for bot in bots:
if bot in (user, 'all'):
return True
elif == 'deny':
if ''.join(bots) == 'none': return True
for bot in bots:
if bot in (user, 'all'):
return False
return True
def get_next_iter_item(some_iterable, window=1):
Get the item that will be in next iteration of the loop.
This will be useful for finding {{dead link}} templates.
This code is adapted from an answer to a StackOverflow question by user nosklo
@param some_iterable Thing to iterate over
@param window How far to look ahead
items, nexts = tee(some_iterable, 2)
nexts = islice(nexts, window, None)
return zip_longest(items, nexts)
def save_edit(page, utils, text):#config, api, site, original_text,dry_run):#,config):
#utils = [config,api,site,archive_urls,dry_run]
config = utils[0]
api = utils[1]
site = utils[2]
dry_run = utils[4]
archive_urls = utils[3]
original_text = text
code = mwparserfromhell.parse(text)
for template in code.filter_templates():
if"nobots") or"Wikipedia:Exclusion compliant"):
if template.has("allow"):
if "TweetCiteBot" in template.get("allow").value:
break # can edit
print("\n\nPage editing blocked as template preventing edit is present.\n\n")
if not call_home(site):#config):
raise ValueError("Kill switch on-wiki is false. Terminating program.")
edit_summary = """Converted Tweet
URLs to [[Template:Cite tweet|{{cite tweet}}]] using
[[User:""" + config.get('enwiki','username') + "| " + config.get('enwiki','username') + "]]-PyEdition. Mistake? [[User talk:TheSandDoctor|msg TSD!]] (please mention that this is the PyEdition!)"""
time = 0
while True:
#content_changed = False
#text = page.edit()
#text = text.replace('[[Category:Apples]]', '[[Category:Pears]]')
if time == 0:
text = page.text()
if time == 1:
# page = site.Pages[page.page_title]
original_text = site.Pages[page.page_title].text()
content_changed, text = convert(original_text,dry_run, api, archive_urls)
if dry_run:
print("Dry run")
#Write out the initial input
text_file = open("Input02.txt", "w")
#Write out the output
if content_changed:
text_file = open("Output02.txt", "w")
print("Content not changed, don't print output")
if verbose:
print("LIVE run")
#print("Would have saved here")
#TODO: Enable
if not content_changed:
if verbose:
print("Content not changed, don't bother pushing edit to server")
#break, summary=edit_summary, bot=True, minor=True)
print("Saved page")
if time == 1:
time = 0
except [[EditError]]:
time = 1
time.sleep(5) # sleep for 5 seconds, giving server some time before querying again
except [[ProtectedPageError]] as e:
print('Could not edit ' + page.page_title + ' due to protection')
def convert(text,dry_run, api,archive_urls):
Converts use of {{cite web}} for tweets (if present) to using {{cite tweet}}.
@param text Page text to go over
@param dry_run boolean Whether or not this is a dry run (dry run = no live edit)
@param api Twitter API instance
@returns [content_changed, content] Whether content was changed,
(if former true, modified) content.
# print("In remove {}".format(dry_run))
wikicode = mwparserfromhell.parse(text)
templates = wikicode.filter_templates()
content_changed = False
#TODO: Testing (dry run) only
if dry_run:
text_file = open("Input.txt","w")
#TODO: End dry run only
code = mwparserfromhell.parse(text)
dead_link = False
use_mdy = False
for template, next_template in get_next_iter_item(code.filter_templates()):#Tracklist, Track, Soundtrack, Tlist, Track list =
#if"dead link"):
# dead_link = True
if ("use mdy dates") or"mdy")
or"use mdy") or"usemdy")
if verbose:
print("Use MDY format")
use_mdy = True
# else:
# Clearly the date format template isn't there, so just assume other format should be used
# by default
# print("Use DMY format")
if ("cite web") or"citeweb")
or"c web") or"cita web")
or"weblink") or"ref web")
or"citweb") or"cw")
or"web cite") or"web citation")
or"cite w") or"cit web")
or"cite url") or"cite blog")
or"cite web.") or"cite webpage")
or"web reference") or"web-reference")
or"cite wb") or"cite we")
or"citat web") or"cite-web")
or"webbref") or"cite website")
or"cite website article") or"chú thích web")
or"citace elektronické monografie") or"citeer web")
or"یادکرد وب") or"웹 인용")
or"cite web/lua")):
if template.has("url"):
url = template.get("url").value
match = re.match(r'(?:(?:\s)*https?:\/\/)?(?:www\.)?(?:\s)*?twitter\.com\/(?:#!\/)?@?([^\/\?\s]*)\/status\/([{\d+:\d+]+)',str(url))
if match: # it is a twitter URL
if next_template:
if"dead link"): #TODO: Expand to cover variations/aliases of {{dead link}}
# Play it safe and leave this template as the next one
# shouldn't be a deadlink (if it is, doing all this for nothing)
tweet = api.GetStatus(
if tweet:
has_archive_url = False
content_changed = True
if verbose:
#url_reg = r'[a-z]*[:.]+\S+'
url_reg = r'((?:\s)*https?:\/\/)?(?:www\.)?(?:\s)*?t\.co\/([a-zA-Z0-9])*'
sec_pattern = r'/\r|\n/'
text = re.sub(url_reg, '', tweet.text)
text = re.sub(sec_pattern, ' ', text)
tweet_text = textwrap.shorten(text,width=40,placeholder="...")
if verbose:
tweet_obj = "{{cite tweet|number=" + str( + "|user=" + tweet.user.screen_name + "|title=" + tweet_text
tweet_accessdate = tweet_archivedate = tweet_language = tweet_archiveurl = tweet_date = None
if template.has("accessdate") or template.has("access-date"):
#tweet_accessdate = template.get("accessdate").value
tweet_obj += "|accessdate=" + str(template.get("accessdate").value)
if verbose:
print("Has accessdate")
if template.has("archivedate") or template.has("archive-date"):
if verbose:
print("Has archive date")
# tweet_archivedate = template.get("archivedate").value
tweet_obj += "|archivedate=" + str(template.get("archivedate").value)
if template.has("language"):
#tweet_language = template.get("language").value
tweet_obj += "|language=" + str(template.get("language").value)
if verbose:
print("Has language")
if template.has("archiveurl"):
has_archive_url = True
#tweet_archiveurl = template.get("archiveurl").value
tweet_obj += "|archiveurl=" + str(template.get("archiveurl").value)
if verbose:
print("Has archiveurl")
if template.has("date"):
#tweet_date = template.get("date").value
tweet_obj += "|date=" + str(template.get("date").value)
if verbose:
print("Has date")
#For reference:
date_format = '%-d %B %Y'
if use_mdy:
date_format = '%B %-d, %Y'
tweet_obj += "|date=" + parse(tweet.created_at).strftime(date_format)
#tweet_obj += "}}"
if not has_archive_url and archive_urls:
wb = wayback.Wayback()
archive_url = wb.closest(str(url))
if archive_url:
tweet_obj += "|archive-url=" + archive_url
tweet_obj += "|archivedate=" +'%B %Y')
# code.replace(template, str(template) + "{{dead link|date=" +
# + "|url=" + archive_url + "|bot=TweetCiteBot}}")
# code.replace(template, str(template) + "{{dead link|date=" +
# + "|fix-attempted=yes" + "|bot=TweetCiteBot}}")
tweet_obj += "}}"
code.replace(template, tweet_obj)
content_changed = True
except TwitterError as err:
#TODO: Somewhere here we should try to look to archive,
# since Tweet clearly doesn't exist.
# TODO: Figure out wayback in python
print("Clearly something went wrong with tweet " + str(err))
wb = wayback.Wayback()
archive_url = wb.closest(str(url))
date_format = '%B %Y'
if archive_url:
code.replace(template, str(template) + "{{dead link|date=" +
+ "|url=" + archive_url + "|bot=TweetCiteBot}}")
content_changed = True
code.replace(template, str(template) + "{{dead link|date=" +
+ "|fix-attempted=yes" + "|bot=TweetCiteBot}}")
content_changed = True
return [content_changed, str(code)] # get back text to save
def getList():
f = open("list of all articles containing links to tweets (unmarked up).txt", 'r')
lst ='\n')
articles = []
for l in lst:
if not l is "":
return articles
def main():
limited_run = True
pages_to_run = 4727
offset = 4721
category = None
archive_urls = False
dry_run = False
verbose = False
parser = argparse.ArgumentParser(prog='TweetCiteBot Tweet URL conversion', description='''Reads {{cite web}} templates
on articles looking for url parameters containing Tweet URLs. If found, convert template to {{cite tweet}} and retrieve
relevant information (if possible). If the Tweet is a dead link, attempt recovery with the Wayback archive and tag accordingly
on-wiki. This task was approved by the English Wikipedia Bot Approvals Group at 17:59, 2 December 2017 (UTC) by BAG admin
parser.add_argument("-dr", "--dryrun", help="perform a dry run (don't actually edit)",
parser.add_argument("-arch","--archive", help="actively archive Tweet links (even if still live links)",
parser.add_argument("-v","--verbose", help="Display more information when running",
args = parser.parse_args()
if args.dryrun:
dry_run = True
print("Dry run")
if args.archive:
print("Archive allow")
archive_urls = True
if args.verbose:
print("Verbose mode")
verbose = True
#raise ValueError("for testing, dont want whole script running")
site = mwclient.Site(('https',''), '/w/')
#site = mwclient.Site(('https','','/w/'))
config = configparser.RawConfigParser()'credentials.txt')
site.login(config.get('enwiki','username'), config.get('enwiki', 'password'))
except errors.LoginError as e:
raise ValueError("Login failed.")
api = twitter.Api(consumer_key='CUST_KEY',
counter = 0
#for page in site.Categories[category]:
#page = site.Pages['User:TweetCiteBot/sandbox']#"If You Ever Think I Will Stop Goin' In Ask Double R"]#'3 (Bo Bice album)']
# print("Working with: " +
#page = site.Pages['User:TweetCiteBot/sandbox']#'3 (Bo Bice album)']
# if limited_run:
# if counter < pages_to_run:
# counter += 1
# else:
# return # run out of pages in limited run
utils = [config,api,site,archive_urls,dry_run]
list = getList()
#raise ValueError("yoyo")
#page = site.Pages["Rhode Island Rams women's ice hockey"]
# text = page.text()
# try:
# save_edit(page, utils, text)#config, api, site, text, dry_run)#, config)
#time.sleep(0.5) # sleep 1/2 second in between pages
# except ValueError as err:
# print(err)
if limited_run:
while counter < pages_to_run:
if offset > 0:
offset -= 1
if verbose:
print("Skipped due to offset config")
counter += 1
print("Working with: " + list[counter])
page = site.Pages[list[counter]]
text = page.text()
save_edit(page, utils, text)#config, api, site, text, dry_run)#, config)
#time.sleep(0.5) # sleep 1/2 second in between pages
except ValueError as err:
counter += 1
# return # run out of pages in limited run
#for art in getList():
# if offset > 0:
# offset -= 1
# print("Skipped due to offset config")
# continue
# print("Working with: " + art)
# page = site.Pages[art]
# if limited_run:
# if counter < pages_to_run:
# counter += 1
# text = page.text()
# try:
# save_edit(page, utils, text)#config, api, site, text, dry_run)#, config)
# #time.sleep(0.5) # sleep 1/2 second in between pages
# except ValueError as err:
# print(err)
# else:
# return # run out of pages in limited run
#text = page.text()
# try:
# save_edit(page, utils, text)#config, api, site, text, dry_run)#, config)
# except ValueError as err:
# print(err)
#sleep(5) # sleep 5 seconds in between pages
if __name__ == "__main__":
verbose = False
except KeyboardInterrupt: