-
Notifications
You must be signed in to change notification settings - Fork 10
/
grabber.py
65 lines (52 loc) · 1.74 KB
/
grabber.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#!/usr/bin/python
#coding:utf-8
import os # path manipulation
import urllib as urllib
import requests
status = 'not done yet'
# change this to your danbooru folder
# it might look something like this: '/users/YourUserName/DanbooruPics'
# make sure the folder already exists!
danbooru_folder = '/Users/chaoguo/Pictures/'
# generate tag argument to be used in url and folder creation
def generate_tag_argv(tagList):
tag_argv = ''
for tag in tagList:
tag_argv = tag_argv + tag + '+'
tag_argv = tag_argv[:-1]
return tag_argv
# request json, get urls of pictures and download them
def grabber(tag_argv,page_num):
r = requests.get('https://danbooru.donmai.us/posts.json?tags='+tag_argv+'&page='+str(page_num))
streams = r.json()
# check if all pages have been visited
if len(streams) == 0:
print("All pictures have been downloaded!")
global status
status = 'done'
else:
# check if directory already exists
if (os.path.exists(danbooru_folder+tag_argv) == False):
os.mkdir(danbooru_folder+tag_argv)
url = []
for post in streams:
if 'file_url' in post:
url.append(post['file_url'])
target = ['https://danbooru.donmai.us'+x for x in url]
# download
for address in target:
urllib.urlretrieve(address,danbooru_folder+tag_argv+'/'+address.split('/')[-1])
def main():
page_num = input('Enter the number of pages you want to download. To download all, simply enter a super large number:')
taginput = raw_input('Enter tags,separated by space:')
n = 1
while n <= page_num and status == 'not done yet':
tagList = taginput.split(' ')
tag_argv = generate_tag_argv(tagList)
grabber(tag_argv,n)
n = n + 1
print('Download successful!')
u2 = u'どうぞ、召し上がってください!'
print u2
if __name__ == '__main__':
main()