Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
branch: master
Fetching contributors…

Cannot retrieve contributors at this time

94 lines (74 sloc) 3.327 kb
import urllib, re, string, os
from htmlentitydefs import entitydefs
babel_regex = re.compile('name="q">([^<]+)</textarea>', re.MULTILINE)
def translate(phrase, lang1, lang2):
params = urllib.urlencode( { 'BabelFishFrontPage' : 'yes',
'doit' : 'done',
'urltext' : phrase,
'lp' : lang1 + '_' + lang2 } )
data = urllib.urlopen('http://babelfish.altavista.com/tr', params).read()
m = babel_regex.search(data)
if m: return string.strip(m.group(1))
else: return 'Cannot translate'
def weather(zip):
forecast=''
temp=''
wind=''
reported=''
msg=None
wurl=urllib.urlopen("http://www.weather.com/weather/local/%s" % (zip))
for line in wurl.readlines():
if string.find(line, 'insert forecast text') != -1:
forecast=string.split(string.split(line, '-->')[1], '</td>')[0]
elif string.find(line, 'insert current temp') != -1:
temp=string.split(string.split(line, '-->')[1], '&')[0]
elif string.find(line, 'insert wind information') != -1:
wind=string.split(string.split(line, '-->')[1], '&')[0]
elif string.find(line, 'insert reported by and last updated info') != -1:
reported=string.strip(string.join(string.split(string.split(line, '-->')[1], '&nbsp;')))
if forecast != '' and temp != '' and wind != '' and reported != '':
msg="Current Forcast: %s. Temp: %sF. Wind: %smph (%s)" % (forecast, temp, wind,reported)
return msg
metar_regex = re.compile('The observation is:(<[^>]*>|\s+)*([^<]+)<')
def metar(station):
data = urllib.urlopen("http://weather.noaa.gov/cgi-bin/mgetmetar.pl?cccc=%s" % station).read()
m = metar_regex.search(data)
if not m: return
return string.strip(m.group(2))
image_regex = re.compile('<img src=(/images\?q=tbn:\S+) width=(\d+) height=(\d+)')
def google_image(phrase, safe=1):
if safe: imgsafe = 'on'
else: imgsafe = 'off'
data = urllib.urlopen('http://images.google.com/images?q=%s&imgsafe=%s' % (urllib.quote_plus(phrase), imgsafe)).read()
images = []
while data:
m = image_regex.search(data)
if not m: break
images.append(('http://images.google.com%s' % m.group(1), int(m.group(2)), int(m.group(3))))
data = data[m.end():]
return images
def image(url, filename):
urllib.urlretrieve(url, 'google.jpg')[0]
os.spawnlp(os.P_WAIT, 'convert', 'convert', 'google.jpg', filename)
urllib.urlcleanup()
clean_regex = re.compile('&(\w+);')
def clean(text):
data = clean_regex.split(text)
for i in range(1, len(data), 2):
data[i] = entitydefs[data[i]]
return ''.join(data)
google_regex = re.compile('<p><a href=([^>]+)>(.*)</a>(.*)')
tag_regex = re.compile('<[^>]*>')
def google(phrase, groups=0):
phrase = urllib.quote_plus(phrase)
if groups: searchurl = 'http://groups.google.com/groups?q=%s' % phrase
else: searchurl = 'http://www.google.com/search?q=%s' % phrase
data = urllib.urlopen(searchurl).read()
hits = []
data = google_regex.split(data)
for i in range(1, len(data), 4):
url = data[i]
title = clean(''.join(tag_regex.split(data[i+1])))
text = clean(''.join(tag_regex.split(data[i+2])))
hits.append((url, title, text))
return hits
Jump to Line
Something went wrong with that request. Please try again.