-
Notifications
You must be signed in to change notification settings - Fork 4
/
scrape_list.py
77 lines (67 loc) · 3.93 KB
/
scrape_list.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 16:32:46 2018
@author: sqian
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup as soup
from bs4 import SoupStrainer
from lxml import html
import requests
import numpy as np
import time
import pandas as pd
import win32com.client as win32
browser = webdriver.Chrome()
# Test for once
page = 0
url = "https://datacatalog.worldbank.org/search/indicators?search_api_views_fulltext_op=AND&query=&nid=&sort_by=changed&sort_order=DESC&0=type%3Aindicators&page=0%2C{0}&f[0]=field_wbddh_data_type%3A293&f[1]=type%3Aindicators".format(page)
browser.get(url)
time.sleep(5)
innerHTML = browser.execute_script("return document.body.innerHTML")
tree = html.fromstring(innerHTML)
for page in range(0, 1606):
url = "https://datacatalog.worldbank.org/search/indicators?search_api_views_fulltext_op=AND&query=&nid=&sort_by=changed&sort_order=DESC&0=type%3Aindicators&page=0%2C{0}&f[0]=field_wbddh_data_type%3A293&f[1]=type%3Aindicators".format(page)
browser.get(url)
innerHTML = browser.execute_script("return document.body.innerHTML")
tree = html.fromstring(innerHTML)
data_temp = pd.DataFrame(columns=['title','period','database','source','last_update'])
row_num = min(11, 16056 - 10 * page + 1)
for i in range(1,row_num):
data_dict = {}
try:
data_dict['title'] = tree.xpath('//div[contains(@class,"views-row-{0} ")]/div[starts-with(@id,"node"),1]/h2/a/text()'.format(i))
except:
data_dict['title'] = ''
try:
data_dict['period'] = tree.xpath('//div[contains(@class,"views-row-{0} ")]/div[starts-with(@id,"node")]/div[2]/span/span[text()=" Periodicity:"]/following-sibling::b/text()'.format(i))
except:
data_dict['period'] = ''
try:
data_dict['database'] = [x.strip() for x in tree.xpath('//div[contains(@class,"views-row-{0} ")]/div[starts-with(@id,"node")]/div[2]/span/span[text()="Dataset:"]/following-sibling::b/text()'.format(i))]
except:
data_dict['database'] = ''
try:
data_dict['source'] = [x.strip() for x in tree.xpath('//div[contains(@class,"views-row-{0} ")]/div[starts-with(@id,"node")]/div[2]/span/span[text()="Source:"]/following-sibling::b/text()'.format(i))]
except:
data_dict['source'] = ''
try:
data_dict['last_update'] = tree.xpath('//div[contains(@class,"views-row-{0} ")]/div[starts-with(@id,"node")]/div[2]/span/span[text()="Last Updated:"]/following-sibling::b/text()'.format(i))
except:
data_dict['last_update'] = ''
# data_dict['title'] = tree.xpath('//*[starts-with(@id,"node"),1]/h2/a/text()')
# data_dict['period'] = tree.xpath('//*[starts-with(@id,"node")]/div[2]/span/span[text()=" Periodicity:"]/following-sibling::b/text()')
# data_dict['database'] = [x.strip() for x in tree.xpath('//*[starts-with(@id,"node")]/div[2]/span/span[text()="Dataset:"]/following-sibling::b/text()')]
# # data_dict['source'] = [x.strip() for x in tree.xpath('//*[starts-with(@id,"node")]/div[2]/span/span[text()="Source:"]/following-sibling::b/text()')]
# data_dict['last_update'] = tree.xpath('//*[starts-with(@id,"node")]/div[2]/span/span[text()="Last Updated:"]/following-sibling::b/text()')
data_temp = data_temp.append(data_dict, ignore_index=True)
if page == 0:
data_temp.to_csv('indicators_WBG.csv', index=False, header=True)
else:
data_temp.to_csv('indicators_WBG.csv', mode='a', index=False, header=False)
print('Page {0} Done! {1:.2f}% Finished!'.format(page+1, (page+1)/1605*100))