Skip to content

Commit

Permalink
Add !!/report command
Browse files Browse the repository at this point in the history
Privileged users can use it to manually report a question in Charcoal HQ
and Tavern on the Meta, through SmokeDetector.

Syntax:
!!/report <post url>
  • Loading branch information
thomas-daniels committed Jun 9, 2015
1 parent 9fe7799 commit f27292e
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 3 deletions.
44 changes: 44 additions & 0 deletions apigetpost.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import requests
from parsing import fetch_post_id_and_site_from_url, url_to_shortlink
import HTMLParser


class PostData:
def __init__(self):
self.post_id = None
self.post_url = None
self.post_type = None
self.site = None
self.owner_url = None
self.owner_name = None
self.title = None


def api_get_post(post_url):
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_type == "answer":
api_filter = "!1zSl_EE)(nuF4Xn(2sDLC"
req_url = "https://api.stackexchange.com/2.2/answers/" + post_id +\
"?site=" + site + "&filter=" + api_filter
resp_json = requests.get(req_url).json()
else:
assert post_type == "question"

api_filter = "!gB6tXYzgnc3pG)x0n*03eR9*kZWXReH54Qb"
req_url = "https://api.stackexchange.com/2.2/questions/" + post_id +\
"?site=" + site + "&filter=" + api_filter
resp_json = requests.get(req_url).json()
if len(resp_json['items']) == 0:
return False
item = resp_json['items'][0]
post_data = PostData()
post_data.post_id = post_id
post_data.post_url = url_to_shortlink(item['link'])
post_data.post_type = post_type
h = HTMLParser.HTMLParser()
post_data.title = h.unescape(item['title'])
if 'owner' in item and 'owner' is not None:
post_data.owner_name = item['owner']['display_name']
post_data.owner_url = item['owner']['link']
post_data.site = site
return post_data
16 changes: 15 additions & 1 deletion chatcommunicate.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import random
import requests
import time
from parsing import *
from datahandling import *
Expand All @@ -9,6 +8,8 @@
import re
from datetime import datetime
from utcdate import UtcDate
from apigetpost import api_get_post
from spamhandling import handle_spam

# Please note: If new !!/ commands are added or existing ones are modified, don't forget to
# update the wiki at https://github.com/Charcoal-SE/SmokeDetector/wiki/Commands.
Expand Down Expand Up @@ -230,6 +231,19 @@ def watcher(ev, wrap2):
ev.message.reply("Error: %s" % val)
else:
ev.message.reply("Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`.")
if content_lower.startswith("!!/report") \
and is_privileged(ev_room, ev_user_id, wrap2):
if len(message_parts) < 2:
ev.message.reply("Not enough arguments.")
return
url = message_parts[1]
post_data = api_get_post(url)
if post_data is False:
ev.message.reply("Could not find data for this post in the API. Check whether the post is not deleted yet.")
return
handle_spam(post_data.title, post_data.owner_name, post_data.site, post_data.post_url,
post_data.owner_url, post_data.post_id, ["Manually reported " + post_data.post_type],
post_data.post_type == "answer")
if content_lower.startswith("!!/wut"):
ev.message.reply("Whaddya mean, 'wut'? Humans...")
if content_lower.startswith("!!/lick"):
Expand Down
4 changes: 2 additions & 2 deletions parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def fetch_post_id_and_site_from_url(url):
search_regex = r"^https?:\/\/([\w.]+)/questions/\d+/.+/(\d+)#\d+$"
else:
post_type = "question"
search_regex = r"^https?:\/\/([\w.]+)/questions/(\d+)/.+$"
search_regex = r"^https?:\/\/([\w.]+)/questions/(\d+)(?:/.+)?$"
found = regex.compile(search_regex).search(url)
if found is not None:
try:
Expand All @@ -47,7 +47,7 @@ def fetch_post_id_and_site_from_url(url):
return (post_id, post_site, post_type)
except:
return None
search_regex = r"^https?:\/\/([\w.]+)/(q|a)/(\d+)"
search_regex = r"^https?:\/\/([\w.]+)/(q|a)/(\d+)(?:/\d+)?"
found = regex.compile(search_regex).search(url)
if found is None:
return None
Expand Down

0 comments on commit f27292e

Please sign in to comment.