-
Notifications
You must be signed in to change notification settings - Fork 2
/
ville_ideale_scraping.py
180 lines (139 loc) · 6.25 KB
/
ville_ideale_scraping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import requests
import urllib
import re
from lxml import html
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
from random import random
from typing import (List, Dict)
class proxyExecution():
def __init__(self, url: str= None, save_path: str= "../good_data/web_scraping/proxy_list.txt"):
if url == None:
self.url = "https://raw.githubusercontent.com/clarketm/proxy-list/master/proxy-list-raw.txt"
else:
self.url= url
self.save_path= save_path
self.listed_proxy= []
self.good_prox= []
pass
def __collect_proxy_list(self) -> None:
urllib.request.urlretrieve(self.url, self.save_path)
f = open(self.save_path, "r")
self.listed_proxy = f.read().split("\n")
pass
def collect(self) -> List[str]:
self.__collect_proxy_list()
return self.listed_proxy
def __transform_proxy_http(self, proxy: str) -> str:
return "http://" + proxy
def __test_proxy_list(self, num: int= 20) -> None:
# NE PAS EXECUTER SI ON VEUT GAGNER DU TEMPS
# Pour raison de simpliciter, on cherche que les proxy qui ont un port 8080
pat = re.compile('.:8080$')
proxies_list = [l for l in self.listed_proxy \
if l in list(filter(pat.findall, self.listed_proxy))]
# proxies_list = [l for l in self.listed_proxy]
for prox in proxies_list:
if len(self.good_prox) <= num:
try:
print(prox)
# On génère un User Agent aléatoire pour chaque proxy
software_names = [SoftwareName.CHROME.value]
operating_systems = [OperatingSystem.WINDOWS.value, OperatingSystem.LINUX.value]
user_agent_rotator = UserAgent(software_names=software_names,
operating_systems=operating_systems, limit=100)
proxies = {"http": self.__transform_proxy_http(prox),
"https": self.__transform_proxy_http(prox)}
user_agent = user_agent_rotator.get_random_user_agent()
headers = {"User-Agent": user_agent}
r = requests.Session()
r.headers.update(headers)
r.proxies.update(proxies)
# Connexion à la page (I found azlyris in a past project, which is in my opinion a good site
# for testing)
page = r.get("https://www.azlyrics.com/", proxies=proxies, headers=headers)
# Si la connexion est fructueuse, alors le proxy est stocké
self.good_prox.append(prox)
except:
# Si je ne peux pas me connecter avec ce proxy, alors je teste le suivant
print("Not Good")
continue
else:
# Stop selection if we get {num} good proxies
break
print("End")
pass
def test(self) -> List[str]:
self.__test_proxy_list()
return self.good_prox
class villeIdealScraper():
def __init__(self):
self.base_url= "https://www.ville-ideale.fr/paris-9e-arrondissement"
self.r= None
self.proxy_list= None
self.random_proxy_list= None
self.page= None
pass
def __generate_random_proxy(self) -> str:
if self.proxy_list == None:
try:
collect_proxy = proxyExecution()
self.proxy_list = collect_proxy.collect()
except ValueError:
print("Can't find proxy list. You must put in manually")
pass
else:
self.random_proxy_list = sorted(self.proxy_list, key=lambda x: random())
return self.random_proxy_list[0]
def __transform_proxy_http(self, proxy: str) -> str:
return "http://" + proxy
def __generate_random_agent(self) -> None:
software_names = [SoftwareName.CHROME.value]
operating_systems = [OperatingSystem.WINDOWS.value, OperatingSystem.LINUX.value]
user_agent_rotator = UserAgent(software_names=software_names,
operating_systems=operating_systems, limit=100)
user_agent = user_agent_rotator.get_random_user_agent()
headers = {"User-Agent": user_agent}
self.r = requests.Session()
self.r.headers.update(headers)
pass
def __scrap_invisble(self) -> None:
proxy= self.__generate_random_proxy()
self.__generate_random_agent()
proxy_html_format = {"http": self.__transform_proxy_http(proxy),
"https": self.__transform_proxy_http(proxy)}
self.r.proxies.update(proxy_html_format)
# self.page= r.get()
pass
def __generate_good_url(self, arr: int) -> str:
return self.base_url + "_751{}".format(str(arr).zfill(2))
# @tailrec
def __activate_valide_scraper(self, url: str, verbose: bool= True) -> None:
try:
self.__scrap_invisble()
self.page= self.r.get(url)
except:
if verbose:
print("Not a good proxy")
self.__activate_valide_scraper(url= url)
pass
def scrap(self, arr: int, anonymous: bool= True) -> Dict[str, List[str]]:
global list_result
pattern= re.compile("([\D]*)(\d{1}\S\d*)") # allow to separate col names and grades
if anonymous: # work only with anonymous (flemme)
url= self.__generate_good_url(arr= arr)
self.__activate_valide_scraper(url= url)
tree = html.fromstring(self.page.content)
global_note = tree.xpath('//p[@id="ng"]/text()')
table_note = tree.xpath('//table[@id="tablonotes"]//tr')
dict_result= {"Arrondissement": [arr]}
for t in table_note:
row= t.text_content()
list_result= [res for res in pattern.split(row) if res != '']
dict_result[list_result[0]]= [list_result[1]]
# add global grade
dict_result["Note global"]= [global_note[0]]
return dict_result
else:
print("Nono")
pass