-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
45 lines (39 loc) · 1.34 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
PROJECT_NAME = input('What would you like to call your project: ')# In Python, to denote a const. var, type it in all caps
HOMEPAGE = input('Enter the URL of the website you would like to crawl: ')
DOMAIN_NAME = getDomainName(HOMEPAGE)
QUEUE_FILE = 'projects/' + PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = 'projects/' + PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 8 # Depends on the operating system
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# Create worker threads (will die when main exists)
def createWorkers():
for _ in range(NUMBER_OF_THREADS): # use _ as you're not using it in formulas etc., it is convention
t = threading.Thread(target = work)
t.daemon = True # Dies whenever the main process exists
t.start()
# Do the next job in the queue
def work():
while True:
url = queue.get()
Spider.crawlPage(threading.current_thread().name, url)
queue.task_done()
# Each queued link is a new job
def createJobs():
for link in fileToSet(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
# Check if there are items in the queue, if so, crawl them
def crawl():
queueLinks = fileToSet(QUEUE_FILE)
if len(queueLinks) > 0:
print(str(len(queueLinks)) + ' links in the queue')
createJobs()
createWorkers()
crawl()