|
| 1 | +# -*- coding: UTF-8 -*- |
| 2 | + |
| 3 | +import requests |
| 4 | +from bs4 import BeautifulSoup |
| 5 | +import string |
| 6 | + |
| 7 | + |
| 8 | +word = input('请输入要搜索的资源名称:') |
| 9 | + |
| 10 | +headers = { |
| 11 | + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36' |
| 12 | +} |
| 13 | + |
| 14 | +result_list = [] |
| 15 | +for i in range(1, 11): |
| 16 | + print('正在搜索第 {} 页'.format(i)) |
| 17 | + params = { |
| 18 | + 'page': i, |
| 19 | + 'keyword': word, |
| 20 | + 'search_folder_or_file': 0, |
| 21 | + 'is_search_folder_content': 0, |
| 22 | + 'is_search_path_title': 0, |
| 23 | + 'category': 'all', |
| 24 | + 'file_extension': 'all', |
| 25 | + 'search_model': 2 |
| 26 | + } |
| 27 | + response_html = requests.get('https://www.alipanso.com/search.html', headers = headers,params=params) |
| 28 | + response_data = response_html.content.decode() |
| 29 | + |
| 30 | + soup = BeautifulSoup(response_data, "html.parser"); |
| 31 | + divs = soup.find_all('div', class_='resource-item border-dashed-eee') |
| 32 | + |
| 33 | + if len(divs) <= 0: |
| 34 | + break |
| 35 | + |
| 36 | + for div in divs[1:]: |
| 37 | + p = div.find('p',class_='em') |
| 38 | + if p == None: |
| 39 | + break |
| 40 | + |
| 41 | + download_url = 'https://www.alipanso.com/' + div.a['href'] |
| 42 | + date = p.text.strip(); |
| 43 | + name = div.a.text.strip(); |
| 44 | + result_list.append({'date':date, 'name':name, 'url':download_url}) |
| 45 | + |
| 46 | + if len(result_list) == 0: |
| 47 | + break |
| 48 | + |
| 49 | +result_list.sort(key=lambda k: k.get('date'),reverse=True) |
| 50 | +print(result_list) |
| 51 | + |
| 52 | +with open("aliso.html", encoding='utf-8') as t: |
| 53 | + template = string.Template(t.read()) |
| 54 | + |
| 55 | +final_output = template.substitute(elements=result_list) |
| 56 | +with open("report.html", "w", encoding='utf-8') as output: |
| 57 | + output.write(final_output) |
0 commit comments