Permalink
Find file
Fetching contributors…
Cannot retrieve contributors at this time
executable file 233 lines (211 sloc) 9.92 KB
#!/usr/bin/env python3.1
# -*- coding: UTF-8 -*-
# Wed Aug 31, 02:46 sunuslee
# sunuslee (at) gmail(dot)com
# This is my first python program , with my first cgi page and first apache server
# You can do anything you want with those files under ONLY ONE condition:
# Please Do keep those lines above
import threading
import queue
import sys, codecs
import sys
import urllib.request
import urllib.parse
import urllib.error
import time
import random
import os
import cache
import dl
import pickle
CSS = True
IS_LOCAL = True
ROOTDIR = "/home/sunus/apache/" if IS_LOCAL == True else "/usr/local/apache2/"
LINK_DB_PEOPLE = "http://www.douban.com/people/"
people_nr = 5
people_list = []
table_movie = []
table_music = []
table_book = []
cat_chs = {'movie':'电影','music':'音乐','book':'书籍'}
global fh_write_html
class Worker(threading.Thread):
def __init__(self, work_queue):
super().__init__()
self.work_queue = work_queue
def run(self):
while True:
try:
uid, cat, start, que = self.work_queue.get()
tname = self.getName()
self.process(uid, cat, start, que, tname)
finally:
self.work_queue.task_done()
def process(self, uid, cat, start, que, tname):
dl.download_t(uid, cat, start, que, tname)
def get_user_collection(user, cat):
global table_movie
global table_music
global table_book
if cat == 'movie':
table = table_movie
elif cat == 'music':
table = table_music
else:
table = table_book
file_path = ROOTDIR + r'htdocs/cache_datas/' + 'cache_' + user[0] + '_' + cat
item_dict = cache.cache_load(file_path)
table.append([user, item_dict])
def get_user_icon(uid):
fh = urllib.request.urlopen('http://www.douban.com/people/' + uid)
cont = fh.read().decode('utf8')
for line in cont.splitlines():
if 'douban.com/icon' in line:
icon = line.split('"')[1]
return icon
#this function return the codes for print n light starts and maxstar - n dark stars
IMG_PATH = "/imgs/"
def print_stars(n, maxstar, summary='', item_url=''):
if n == -1:
n = 0
if summary == '-1':
summary = ''
n_st = n;
n_nst = maxstar - n
left = '<a href="{0}" title="{1}">'.format(item_url, summary)
s1 = '<img src="{0}sg.png" />'.format(IMG_PATH)
s2 = '<img src="{0}sw.png" />'.format(IMG_PATH)
html_code = left + s1 * n + s2 * n_nst
if summary != '':
html_code += '<img src="{0}info.png" /></a>'.format(IMG_PATH)
else:
html_code += '<img src="{0}info1.png" /></a>'.format(IMG_PATH)
return html_code
# This function returns the uid1-uid2's match rate
# uid1 and uid2 has it's own index in table.
def get_match_rate(uid1_idx, uid2_idx, cat):
global fh_write_html
global table_movie
global table_music
global table_book
if cat == 'movie':
table = table_movie
elif cat == 'music':
table = table_music
else:
table = table_book
cat_local = cat_chs[cat]
u1_set = set()
u2_set = set()
common_set = set()
try:
for item_name in table[uid1_idx][1].keys():
u1_set.add(item_name)
for item_name in table[uid2_idx][1].keys():
u2_set.add(item_name)
except IndexError as e:
fh_write_html.write("<h4>Got the problem, uid1_idx = {0} uid2_idx = {1}</h4>\n".format(uid1_idx, uid2_idx))
return None
common_set = u1_set & u2_set
nickname1 = table[uid1_idx][0][1]
nickname2 = table[uid2_idx][0][1]
icon1 = table[uid1_idx][0][2]
icon2 = table[uid2_idx][0][2]
fh_write_html.write('<h4><a href="{0}{1}">{2}</a>\
\
<a href="{0}{3}">{4}</a>\
都喜欢的{5}有({6}):</h4>\n'.format(LINK_DB_PEOPLE, table[uid1_idx][0][0], nickname1,
table[uid2_idx][0][0], nickname2,
cat_local, len(common_set),))
if len(common_set) != 0:
fh_write_html.write('<table border="1" width="600px">\n')
fh_write_html.write('<tr>\
<th align="left" width="300px"><h4>{1}</h4></th>\
<th align="left" width="300px"><h4><a href="{0}{2}">{3}<img src="{4}" /></a></h4></th>\
<th align="left" width="300px"><h4><a href="{0}{5}">{6}<img src="{7}" /></a></h4></th></tr>\n'.format(LINK_DB_PEOPLE, "match",
table[uid1_idx][0][0], nickname1, icon1,
table[uid2_idx][0][0], nickname2, icon2))
link_db_item = "http://{0}.douban.com/subject/".format(cat)
for item_name in common_set:
name = table[uid1_idx][1][item_name][2] if table[uid1_idx][1][item_name][2] != '-1' else item_name
item_url = table[uid1_idx][1][item_name][1]
fh_write_html.write('<tr>\
<td width="300px"><h4><a href="{5}" target="_blank">{0}</a></h4></td>\
<td width="300px"><h4>{1}<br>{2}</h4></td>\
<td width="300px"><h4>{3}<br>{4}</h4></td></tr>\n'.\
format(name,
table[uid1_idx][1][item_name][0], print_stars(int(table[uid1_idx][1][item_name][3]), 5, table[uid1_idx][1][item_name][4], item_url),
table[uid2_idx][1][item_name][0], print_stars(int(table[uid2_idx][1][item_name][3]), 5, table[uid2_idx][1][item_name][4], item_url),
item_url))
fh_write_html.write("</table>\n")
return len(common_set)
def main():
global people_list
global fh_write_html
fa = open('./wait_queue', encoding = 'utf8')
args = fa.readline()[:-1]
args = args.split('\t')
fa.close()
user1 = args[0]
nickname1 = args[1]
icon1 = get_user_icon(user1)
user2 = args[2]
nickname2 = args[3]
icon2 = get_user_icon(user2)
short_url = args[5]
html_page_path = args[6]
#disable icon for now
#icon1 = '/imgs/empty.png'
#icon2 = '/imgs/empty.png'
fh_write_html = open(html_page_path, "w", encoding = "utf8")
fh_write_html.write("<html>\n")
fh_write_html.write("<head>\n")
fh_write_html.write('<meta http-equiv="content-type" content="text/html; charset=UTF-8"/>\n')
fh_write_html.write("<title>Result</title>\n")
if CSS == True:
fh_write_html.write('<link href="styles.css" rel="stylesheet" type="text/css" />\n')
fh_write_html.write('<!--[if gte IE 6.]>\n')
fh_write_html.write('<script defer type="text/javascript" src="pngfix.js"></script>\n')
fh_write_html.write('<![endif]-->\n')
fh_write_html.write("</head>\n")
fh_write_html.write("<body>\n")
people_list.append([user1, nickname1, icon1])
people_list.append([user2, nickname2, icon2])
# Thread Part
dl_queue = queue.Queue()
for i in range(5):
worker = Worker(dl_queue)
worker.daemon = True
worker.start()
for people in people_list:
for cat in ('movie', 'music', 'book'):
file_path = ROOTDIR + r'htdocs/cache_datas/' + 'cache_' + people[0] + '_' + cat
if os.access(file_path, os.R_OK) == False:
dl_queue.put([people[0], cat, 1, dl_queue])
dl_queue.join()
if dl.EXIT == True:
fh_write_html.write("<h4> DL.exit =" + str(dl.EXIT) + "</h4>\n")
fh_write_html.write("<h4>转发本页地址:{0}</h4>\n".format(short_url))
fh_write_html.write("</body>\n")
fh_write_html.write("</html>\n")
print('New page:', short_url)
return
cache.cache_save(ROOTDIR + r'htdocs/cache_datas/' + people[0] + '_' + cat + '_1')
get_user_collection(people, cat)
else:
if people[1].endswith('(c)') == False:
people[1] += '(c)'
get_user_collection(people, cat)
rate = []
for cat in ('movie', 'music', 'book'):
rate.append(get_match_rate(0, 1, cat))
fh_write_html.write('<h4>你和<a href="{0}"><img src="{2}" />{1}</a>都喜欢:</h4>'.format('http://www.douban.com/people/' + user2, nickname2, icon2))
fh_write_html.write('<h4>{0} {1}{2} {3}{4} {5}\n'.format(rate[0], '部电影', rate[1], "张音乐", rate[2], "本书籍"))
fh_write_html.write('<!-- saying:nn\t{0}\t{1}\t{2}\t{3}\t{4}\t -->\n'.format(user2, nickname2, *rate))
fh_write_html.write('<h4>转发本页地址:{0}</h4>\n'.format(short_url))
fh_write_html.write('</body>\n')
fh_write_html.write('</html>\n')
fh_write_html.close()
print('New page:', short_url)
return
main()