-
Notifications
You must be signed in to change notification settings - Fork 1
/
91hanman-down.py
59 lines (53 loc) · 2.18 KB
/
91hanman-down.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# -*- coding:UTF-8 -*-
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import requests
import os
import time
import pdb
if __name__ == '__main__':
list_url = []
for num in range(1,3):
if num == 1:
url = 'http://www.shuaia.net/index.html'
else:
url = 'http://www.shuaia.net/index_%d.html' % num
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
req = requests.get(url = url,headers = headers)
req.encoding = 'utf-8'
html = req.text
bf = BeautifulSoup(html, 'lxml')
targets_url = bf.find_all(class_='item-img')
pdb.set_trace() # 运行到这里会自动暂停
for each in targets_url:
list_url.append(each.img.get('alt') + '=' + each.get('href'))
print('连接采集完成')
exit();
for each_img in list_url:
img_info = each_img.split('=')
target_url = img_info[1]
filename = img_info[0]
print('下载:' + filename)
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
img_req = requests.get(url = target_url,headers = headers)
img_req.encoding = 'utf-8'
img_html = img_req.text
img_bf_1 = BeautifulSoup(img_html, 'lxml')
img_url = img_bf_1.find_all('div', class_='wr-single-content-list')
img_bf_2 = BeautifulSoup(str(img_url), 'lxml')#str 返回一个字符串 把对象转换字符串
img_bf_3 =img_bf_2.find_all('img')
i=0
for each_img_2 in img_bf_3:
i += 1
img_bf_4 = BeautifulSoup(str(each_img_2), 'lxml')#str 返回一个字符串 把对象转换字符串
img_url = img_bf_4.img.get('src')
pdb.set_trace() # 运行到这里会自动暂停
if 'images' not in os.listdir():
os.makedirs('images')
urlretrieve(url = img_url,filename = 'images/' + filename + str(i)+'.jpg')
time.sleep(1)
print('下载完成!')