Skip to content

Commit

Permalink
PEP8 and Pyflakes -fixes, also added both to travis run for tests and…
Browse files Browse the repository at this point in the history
… source
  • Loading branch information
lepinkainen committed Dec 28, 2015
1 parent f067f93 commit 1f80bb7
Show file tree
Hide file tree
Showing 13 changed files with 153 additions and 139 deletions.
6 changes: 3 additions & 3 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
OBlanguage: python
language: python
sudo: false
python:
- "2.7"
# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
install:
- pip install --user --upgrade pip setuptools
- pip install --user -r requirements.txt
- pip install --user pytest pytest-pep8 nose # we still need nose for the _eq functions for now
- pip install --user pytest pytest-pep8 pytest-flakes nose # we still need nose for the _eq functions for now
# command to run tests, e.g. python setup.py test
before_script: export PYTHONPATH=$PYTHONPATH:$(pwd)
script: py.test --pep8
script: py.test --pep8 --flakes
notifications:
irc:
channels:
Expand Down
4 changes: 2 additions & 2 deletions pyfibot/colorlogger.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
BOLD_SEQ = "\033[1m"


def formatter_message(message, use_color = True):
def formatter_message(message, use_color=True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
Expand All @@ -30,7 +30,7 @@ def formatter_message(message, use_color = True):


class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color

Expand Down
10 changes: 5 additions & 5 deletions pyfibot/modules/available/module_ask.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def getSTARTReply(q):
log.debug("fails: %s" % fail_tags)
return "Failed to parse data. :/"
else: # Let's return the fail tag then.
s = "".join([tag for tag in fail_tags[0](text=True) if type(tag) != Comment and re.search("Accept|Abort", tag) == None])
s = "".join([tag for tag in fail_tags[0](text=True) if type(tag) != Comment and re.search("Accept|Abort", tag) is None])
s = re.sub("<.*?>", "", s) # Remove possibly remaining HTML tags (like BASE) that aren't parsed by bs
s = re.sub("\n|\r|\t|&nbsp;", " ", s).strip(' \t') # One-line it.
s = re.sub("[ ]{2,}", " ", s) # Compress multiple spaces into one
Expand All @@ -85,13 +85,13 @@ def getSTARTReply(q):
for answer in data_tags:

# Cleanups on html depth
[sup.replaceWith(("^%s" % sup.string) if sup.string != None else " ") for sup in answer.findAll('sup')] # Handle <SUP> tags
[sup.replaceWith(("^%s" % sup.string) if sup.string is not None else " ") for sup in answer.findAll('sup')] # Handle <SUP> tags
[br.replaceWith(" ") for br in answer.findAll('br')] # Handle <BR> tags
[td.extract() for td in answer.findAll('td') if len("".join(td.findAll(text=True))) < 10] # Handle <TABLE> data
[cm.extract() for cm in answer.findAll(text=lambda text:isinstance(text, Comment))] # Handle <!-- Comments -->

# Find media by looking for tags like img and script and words like doctype, map, click (It sometimes embeds a whole HTML-document to the results. :S)
if len(answer.findAll({"img": True, "script": True})) > 0 or medias.search("".join(answer(text=True))) != None:
if len(answer.findAll({"img": True, "script": True})) > 0 or medias.search("".join(answer(text=True))) is not None:
media = True
# Cleanups on string depth
s = "".join(answer(text=True))
Expand All @@ -106,7 +106,7 @@ def getSTARTReply(q):
try:
answer = min((ans for ans in answers if len(ans) > 10 and not medias.search(ans)), key=len)
except:
if media == False:
if media is False:
return "Sorry, I don't know"
else:
return "Take a look at %s :P" % shorturl(url).encode("utf-8")
Expand All @@ -125,7 +125,7 @@ def getSTARTReply(q):
answer = "%s &ndash; See %s for more." % (answer, shorturl(url))

# It's not too long, but additional media is available, so let's give a link. :)
elif media == True:
elif media is True:
answer = "%s &ndash; See %s for media." % (answer, shorturl(url))
return unicode(unescape(answer)).encode('utf-8')

Expand Down
9 changes: 5 additions & 4 deletions pyfibot/modules/available/module_forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@ def command_forecast(bot, user, channel, args):
return

def format_day(day):
return (u'%s: %s (%.0f°C/%.0f°C)' % (day['day_of_week'],
day['condition'],
fahrenheit_to_celcius(day['low']),
fahrenheit_to_celcius(day['high'])))
return (u'%s: %s (%.0f°C/%.0f°C)' %
(day['day_of_week'],
day['condition'],
fahrenheit_to_celcius(day['low']),
fahrenheit_to_celcius(day['high'])))

answerstr = u'%s: ' % (result_dict['forecast_information']['city'])
answerstr += u", ".join(format_day(day) for day in result_dict['forecasts'])
Expand Down
2 changes: 1 addition & 1 deletion pyfibot/modules/available/module_imgur.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def upload_gallery(url):

urls = set()

for link in br.links(url_regex = ".jpg$"):
for link in br.links(url_regex=".jpg$"):
urls.add(link.url)

upload_images(urls)
Expand Down
17 changes: 6 additions & 11 deletions pyfibot/modules/module_openweather.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

appid = None


def init(bot):
global appid
global default_location
Expand Down Expand Up @@ -44,17 +45,11 @@ def command_weather(bot, user, channel, args):
url = 'http://api.openweathermap.org/data/2.5/weather?q=%s&units=metric&appid=%s'
r = bot.get_url(url % (location, appid))

print(r)

data = r.json();

print(data)

# try:
# data = r.json()
# except:
# log.debug("Couldn't parse JSON.")
# return bot.say(channel, 'Error: API error, unable to parse JSON response.')
try:
data = r.json()
except:
log.debug("Couldn't parse JSON.")
return bot.say(channel, 'Error: API error, unable to parse JSON response.')

if 'cod' not in data or int(data['cod']) != 200:
log.debug('status != 200')
Expand Down
23 changes: 15 additions & 8 deletions pyfibot/modules/module_urltitle.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,11 @@ def __escaped_fragment(url, meta=False):
return url.geturl()

query = url.query
if query: query += '&'
if query:
query += '&'
query += '_escaped_fragment_='
if url.fragment: query += url.fragment[1:]
if url.fragment:
query += url.fragment[1:]

return urlparse.urlunsplit((url.scheme, url.netloc, url.path, query, ''))

Expand All @@ -158,6 +160,7 @@ def command_cache(bot, user, channel, args):
msg = 'Cache status: %s' % ('ENABLED' if CACHE_ENABLED else 'DISABLED')
bot.say(channel, msg)


def handle_url(bot, user, channel, url, msg):
"""Handle urls"""

Expand Down Expand Up @@ -401,7 +404,8 @@ def _handle_tweet(url):
"""http*://twitter.com/*/statuses/*"""
tweet_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s&include_entities=false"
test = re.match("https?://.*?twitter\.com\/(\w+)/status(es)?/(\d+)", url)
if not test: return
if not test:
return
# matches for unique tweet id string
infourl = tweet_url % test.group(3)

Expand Down Expand Up @@ -474,7 +478,8 @@ def _handle_youtube_gdata(url):
return

items = r.json()['items']
if len(items) == 0: return
if len(items) == 0:
return

entry = items[0]

Expand Down Expand Up @@ -1269,7 +1274,6 @@ def _handle_hitbox(url):
return False



def _handle_google_play_music(url):
"""http*://play.google.com/music/*"""
bs = __get_bs(url)
Expand All @@ -1292,14 +1296,14 @@ def _handle_steamstore(url):
# https://wiki.teamfortress.com/wiki/User:RJackson/StorefrontAPI
api_url = "http://store.steampowered.com/api/appdetails/"
app = re.match("http://store\.steampowered\.com\/app/(?P<id>\d+)", url)
params = { 'appids': app.group('id'), 'cc': 'fi' }
params = {'appids': app.group('id'), 'cc': 'fi'}

r = bot.get_url(api_url, params=params)
data = r.json()[app.group('id')]['data']

name = data['name']
if 'price_overview' in data:
price = "%.2fe" % (float(data['price_overview']['final'])/100)
price = "%.2fe" % (float(data['price_overview']['final']) / 100)

if data['price_overview']['discount_percent'] != 0:
price += " (-%s%%)" % data['price_overview']['discount_percent']
Expand Down Expand Up @@ -1389,13 +1393,15 @@ def _handle_gitio(url):
"""http*://git.io*"""
return __get_title_tag(url)


def _handle_gfycat(url):
"""http*://*gfycat.com/*"""

api_url = "https://gfycat.com/cajax/get/%s"

m = re.match("https?://(?:\w+\.)?gfycat.com/([\w]+)(?:\.gif|\.webm|\.mp4)?", url)
if not m: return
if not m:
return

r = bot.get_url(api_url % m.group(1))
j = r.json()['gfyItem']
Expand Down Expand Up @@ -1444,6 +1450,7 @@ def _handle_ubuntupaste(url):
"""http*://paste.ubuntu.com/*"""
return False


def _handle_poliisi(url):
"""http*://*poliisi.fi/*/tiedotteet/*"""
return False
3 changes: 2 additions & 1 deletion pyfibot/modules/module_webchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ def command_webchat(bot, user, channel, args):

def webchat_getorigin(hexip):
"""Parse webchat hex-format ip to decimal ip and hostname if it exists"""
if len(hexip) != 8: return
if len(hexip) != 8:
return

ip = []
for i in range(2, len(hexip) + 2, 2):
Expand Down
4 changes: 2 additions & 2 deletions pyfibot/modules/module_wolfram_alpha.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@ def init(bot):
def clean_question(_string):
if _string:
res = re.sub("[ ]{2,}", " ",
_string.replace(' | ', ' ').replace('\n', ' ').replace('~~', ' ≈ ')).strip()
_string.replace(' | ', ' ').replace('\n', ' ').replace('~~', ' ≈ ')).strip()
res = res.replace("\:0e3f", u'฿')
return res


def clean_answer(_string):
if _string:
res = re.sub("[ ]{2,}", " ",
_string.replace(' | ', ': ').replace('\n', ' | ').replace('~~', ' ≈ ')).strip()
_string.replace(' | ', ': ').replace('\n', ' | ').replace('~~', ' ≈ ')).strip()
res = res.replace("\:0e3f", u'฿')
return res

Expand Down

0 comments on commit 1f80bb7

Please sign in to comment.