-
Notifications
You must be signed in to change notification settings - Fork 1
/
crawl.py
220 lines (189 loc) · 6.58 KB
/
crawl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
# -*- coding: utf-8 -*-
import sys, re
import requests
import queue
import pickle
import string
import os
import requests
import networkx as nx
import multiprocessing as mp
from bs4 import BeautifulSoup
from threading import Thread
from urllib.parse import urljoin
import config
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
visited = set()
link_queue = mp.Queue()
article_queue = mp.Queue()
utility_queue = mp.Queue()
edge_queue = mp.Queue()
data_queue = mp.Queue()
def get_article_name(url):
if url == config.domain or url == config.domain + "/":
return "Main_Page"
m = re.search(config.article_pattern, url)
if m:
return m.group(1)
def get_request(url):
try:
req = requests.get(url)
return req
except KeyboardInterrupt :
raise
except:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("url open problem " + str(exc_type) + str(exc_obj) + str(exc_tb))
print("in URL", url)
pass
def get_true_url(url):
try:
req = requests.get(url, stream = True)
return req.url
except KeyboardInterrupt :
raise
except:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("url open problem " + str(exc_type) + str(exc_obj) + str(exc_tb))
print("in URL", url)
pass
def extract_links(soup, url):
links = []
for link in soup.findAll('a'):
try:
href = link['href']
except KeyError:
continue
if sum(href.startswith(prefix) for prefix in config.ban_prefix) or sum(href.endswith(sufix) for sufix in config.ban_sufix):
continue
if href.startswith("/wiki/"):
href = config.domain + href
if (not href.startswith('http://')) and (not href.startswith('https://')):
href = urljoin(url, href)
if href.startswith(config.domain):
if href == config.domain or href == config.domain + "/":
href = config.start_page
links.append(href)
return links
def process_links(links, name):
for link in links:
link_queue.put((link, name))
def ArticleCrawler() :
print("article crawler spawned, pid is ", os.getpid())
try:
while True:
url, parent_name = article_queue.get(timeout=150)
req = get_request(url)
try:
req_url = req.url
req_text = req.text
except:
pass
name = get_article_name(req_url)
#print(parent_name, " => ", name)
soup = BeautifulSoup(req_text, "html.parser")
links = extract_links(soup, url)
data_queue.put((name, str(soup.find("div", {"id": "bodyContent"}))))
process_links(links, name)
except queue.Empty:
pass
print("article queue stoping")
def UtilityCrawler():
print("utility crawler spawned, pid is ", os.getpid())
try:
while True:
url = utility_queue.get(timeout=150)
req = get_request(url)
soup = BeautifulSoup(req.text, "html.parser")
links = extract_links(soup, url)
process_links(links, "")
except queue.Empty:
pass
print("utility crawler stopping ");
def UrlMaster():
print("url master spawned, pid is ", os.getpid())
processed = set([config.start_page])
processed_articles = set(["Main_Page"])
redirect_table = {}
try:
while True:
url, parent_name = link_queue.get(timeout=150)
#print(url, parent_name)
url = url.strip().split("#")[0]
if not config.domain in url or sum(page in url for page in config.wiki_ignored_pages):
continue
elif sum(page in url for page in config.wiki_utility_pages):
if url in processed:
continue
processed.add(url)
utility_queue.put(url)
elif "/wiki/" in url:
if url in redirect_table:
url = redirect_table[url]
else:
new_url = url
redirect_table[url] = new_url
url = new_url
#print(url)
name = get_article_name(url)
if parent_name != "":
edge_queue.put((parent_name, name))
if not name in processed_articles:
processed_articles.add(name)
#print(len(processed_articles))
article_queue.put((url, parent_name))
else:
print(url) #debug
except queue.Empty:
pass
print("Url master stopping")
print("Acknowledged articles", len(processed_articles))
def GraphMaster():
print("graph master spawned, pid is ", os.getpid())
try:
G = nx.DiGraph()
prev_amount = 0
while True:
parent, child = edge_queue.get(timeout=150)
G.add_edge(parent, child)
if len(G.nodes()) % 1000 == 0 and len(G.nodes()) != prev_amount:
prev_amount = len(G.nodes())
print(len(G.nodes()), "nodes in graph; ", len(G.edges()), "edges in graph; ", article_queue.qsize(), "articles in queue; ", utility_queue.qsize(), "utility queue size", edge_queue.qsize(), "graph queue size")
except queue.Empty:
pass
pickle.dump(G, open("Graph", "wb"))
print("graph master finishing...")
def DataKeeper():
print("data keeper spawned, pid is ", os.getpid())
articles = {}
try:
counter = 0
while True:
name, article = data_queue.get(timeout=150)
f = open("cc_data/" + str(name) + ".dump", "w")
f.write(article)
counter += 1
if counter % 100 == 0:
print(counter, "events saved")
except queue.Empty:
pass
print("data keeper finalizing...")
print(str(len(articles)) + " articles collected")
if __name__ == '__main__':
article_workers = []
for i in range(config.article_crawlers):
task = mp.Process(target = ArticleCrawler)
task.start()
article_workers.append(task)
utility_workers = []
for i in range(config.utility_crawlers):
task = mp.Process(target = UtilityCrawler)
task.start()
utility_workers.append(task)
data_master = mp.Process(target = DataKeeper)
data_master.start()
graph_master = mp.Process(target = GraphMaster)
graph_master.start()
url_master = mp.Process(target = UrlMaster)
url_master.start()
article_queue.put((config.start_page, "#Main"))