-
Notifications
You must be signed in to change notification settings - Fork 2
/
WebScrapper.py
135 lines (108 loc) · 3.77 KB
/
WebScrapper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
from pandas.core.dtypes.missing import notnull
import requests
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
#https://bscscan.com/address/0xcC64ea842FcDe4283CF239259f7462Ef809c44FD
import webbrowser
print("RUNNING")
def getTokenBalance(whale):
url = "https://bscscan.com/address/" + whale
#print(url)
#webbrowser.open(url, new=2)
'''
#url = "https://bscscan.com/address/" + whale
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
getId = soup.find(id='availableBalanceDropdown')
results = getId.text
'''
#url = "https://bscscan.com/address/" + whale
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})
page = urlopen(req)
soup = BeautifulSoup(page, 'html.parser')
#print(soup)
getId = soup.find(id='availableBalanceDropdown')
#print(type(getId))
if getId == None:
print("COULDNT RETRIEVE BALANCE")
results = getId.text
extra = getId.find_all('span', class_='badge badge-primary mx-1')
#print("NEXT")
done = extra[0].text
#print(results)
final = results.replace(done, '')
#print(final)
final = final.replace('\n', '')
#print(final)
final = final.replace('$', '')
#print(final)
final = final.replace(',', '')
final = float(final)
#print(type(final))
#webbrowser.open(url, new=2)
return final
#print("ALl good")
#print(getTokenBalance("0x2c46b8fdcbe827a814da412ff1ebdc2544e683c1"))
#print("ALL GOOD 2")
def getTxnValue(hash, action):
responseArray = []
value = 0
url = "https://bscscan.com/tx/" + hash
#webbrowser.open(url, new=2)
#page = requests.get(url)
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})
page = urlopen(req).read()
soup = BeautifulSoup(page, 'html.parser')
#getId = soup.find(id='availableBalanceDropdown')
#results = getId.text
results = soup.find_all('span', class_='mr-1')
#results1 = results.find_all(attrs={"data-toggle": "tooltip"})
#print("NEXT")
#done = extra[0].text
#print(results)
index = 0
#print("HELLO")
for result in results:
#print("run")
a = results[index].find_all(attrs={"data-toggle": "tooltip"})
index += 1
#print(result)
#print("Solution: ---------------------------")
if a != []:
#print(type(a))
#print(a[0].text)
#print(type(a[0].text))
responseArray.append(a[0].text)
#print(responseArray)
if len(responseArray) > 1:
if action == "BUY":
value = responseArray[1]
value = Txn_Value_Cleaner(value)
elif action == "SELL":
value = responseArray[-1]
value = Txn_Value_Cleaner(value)
#value = "SELL-------"
else:
value = 0.0
else:
return 0.0
#print(responseArray)
return value
def Txn_Value_Cleaner(input_):
#Transform string to a list, has a form of xx...'.'xx....' '($xx..","..)
# Traverse list until it sees '$' save string from the following index to the end
# Remove extras and return clean float value
#The text[1:] returns the string in text from position 1 to the end, positions count from 0 so '1' is the second character.
list_ = list(input_)
index = 0
for x in list_:
if x == '$':
b = input_[index+1:]
b = b.translate(str.maketrans({',': '', ')': ''}))
return float(b)
index += 1
#print("SELL EXAMPLE-------------")
#print(type(getTxnValue('0x1a0dad3ca8c0612b4828bf22677617647f7fe00d09ee4890da0f79ad23064880')))
#print(getTxnValue('0x4998f506289d95b50fb5f8bc91da0b47bd0a091ad1837e5119a9e4df55b9b049',"SELL"))
#print("ARRAY: ------------")
#print(responseArray)