/
scraper.py
141 lines (121 loc) · 3.96 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
from bs4 import BeautifulSoup
from urllib import urlopen
from splinter import Browser
import time
import random
import scraperwiki
def get_Id(url):
d=url.split('=')
return d[1]
def dateclean(date):
d=date.split('\"')
a=d[1].split('-')
return a[2]+'-'+a[1]+'-'+a[0]
def scrap(url):
response = urlopen(url)
htmltext = BeautifulSoup(response)
id=get_Id(url)
Title =htmltext.find('div',{"class":"mod page-title"}).findNext('h2').text
Text= htmltext.find('div',{"class":"mod page-title"})
Text= Text.findAll('p')
Textfinal=""
for i in range(0,len(Text)) :
Textfinal= Textfinal + BeautifulSoup(str(Text[i])).text
try:
Deadline= htmltext.find('div', {"class":"tbHeader"}).findNext('h4').text
Deadline_clean=dateclean(Deadline)
except:
Deadline=""
Deadline_clean=""
table= htmltext.findAll('td', {"class":"rightTd"})
Udbudstype= table[0].text
Opgavetype= table[1].text
Tildelingskriterier= table[2].text
Ordregiver= table[3].text
Adresse= table[4].text
CPV_kode= table[5].text
Udbudsform =table[6].text
try :
SMV_venligt =table[7].text
Kontaktperson =table[8].text
Kontakt=table[9].text
except :
try:
SMV_venligt=""
Kontaktperson =table[7].text
Kontakt=table[8].text
except :
SMV_venligt=""
Kontaktperson =""
Kontakt=""
data={"ID":unicode(id), \
"Url":unicode(url),\
"Title":unicode(Title),\
"Deadline":unicode(Deadline),\
"Deadline clean":unicode(Deadline_clean),\
"Udbudstype":unicode(Udbudstype),\
"Opgavetype":unicode(Opgavetype),\
"Tildelingskriterier":unicode(Tildelingskriterier),\
"Ordregiver":unicode(Ordregiver),\
"Adresse":unicode(Adresse),\
"CPV kode":unicode(CPV_kode),\
"Udbudsform":unicode(Udbudsform),\
"SMV venligt":unicode(SMV_venligt),\
"Kontaktperson":unicode(Kontaktperson),\
"Kontakt":unicode(Kontakt)}
scraperwiki.sqlite.save(unique_keys=['ID'], data=data)
def redondance(l):
a=False
for i in range(0,len(l)-2):
for j in range(i+1,len(l)-1):
if l[i]==l[j] :
a=True
return a
def suppredon(l):
l1=[]
for el in l:
if el in l1:
pass
else:
l1.append(el)
return l1
def Navigation(link):
with Browser("phantomjs", service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any']) as browser:
browser.driver.set_window_size(1280, 1024)
browser.visit(link)
time.sleep(random.uniform(0.5,2.9))
href=[]
htmltext = BeautifulSoup(browser.html, "html.parser")
soop = htmltext.find('table',{"id":"datagridtenders_1F8CBE3E"}).findNext('tbody')
links = soop.findAll('a')
for i in range(0,len(links)-1):
if i%2==0:
href.append("http://udbud.dk"+links[i].get('href'))
button=1
try:
while(button):
time.sleep(random.uniform(0.5,2.9))
button = browser.find_by_id('datagridtenders_1F8CBE3E_next')
button.click()
htmltext = BeautifulSoup(browser.html, "html.parser")
soop = htmltext.find('table',{"id":"datagridtenders_1F8CBE3E"}).findNext('tbody')
links = soop.findAll('a')
for i in range(0,len(links)-1):
if i%2==0:
href.append("http://udbud.dk"+links[i].get('href'))
if redondance(href) :
button=0
except:
pass
return suppredon(href)
def main():
urls = ['http://udbud.dk/Pages/Tenders/News']
for link in urls:
href=Navigation(link)
for i in href:
try:
scrap(i)
except :
pass
if __name__ == '__main__':
main()