Skip to content

Commit

Permalink
refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
sdabhi23 committed Jan 30, 2024
1 parent 668d436 commit 211eb5f
Show file tree
Hide file tree
Showing 6 changed files with 157 additions and 145 deletions.
2 changes: 1 addition & 1 deletion bsedata/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class InvalidStockException(Exception):
:param status: the status of the stock as mentioned on BSE website
"""

def __init__(self, status: str="Inactive stock"):
def __init__(self, status: str = "Inactive stock"):
if status == "":
self.status = "Inactive stock"
else:
Expand Down
13 changes: 5 additions & 8 deletions bsedata/gainers.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,22 +24,19 @@
"""

from bsedata.helpers import COMMON_REQUEST_HEADERS
from bs4 import BeautifulSoup as bs
import requests

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45'
}


def getGainers() -> dict:
baseurl = '''https://m.bseindia.com'''
res = requests.get(baseurl, headers=headers)
baseurl = """https://m.bseindia.com"""
res = requests.get(baseurl, headers=COMMON_REQUEST_HEADERS)
c = res.content
soup = bs(c, "lxml")
for tag in soup("div"):
try:
if(tag['id'] == 'divGainers'):
if tag["id"] == "divGainers":
resSoup = tag
break
except KeyError:
Expand All @@ -54,7 +51,7 @@ def getGainers() -> dict:
"scripCode": str(tr.td.a["href"].split("=")[1]),
"LTP": str(td[1].string),
"change": str(td[2].string),
"pChange": str(td[3].string)
"pChange": str(td[3].string),
}
gainers.append(gainer)
return gainers
3 changes: 3 additions & 0 deletions bsedata/helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
COMMON_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45'
}
69 changes: 35 additions & 34 deletions bsedata/indices.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,9 @@
"""

import requests
from bsedata.helpers import COMMON_REQUEST_HEADERS
from bs4 import BeautifulSoup as bs

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45'
}
import requests


def indices(category: str) -> dict:
Expand All @@ -43,12 +40,13 @@ def indices(category: str) -> dict:
"composite": "7,1",
"government": "8,1",
"corporate": "9,1",
"money_market": "10,1"
"money_market": "10,1",
}
try:
ddl_category = cat[category]
except KeyError:
print('''
print(
"""
### Invalid category ###
Use one of the categories mentioned below:
Expand All @@ -62,50 +60,53 @@ def indices(category: str) -> dict:
government
corporate
money_market
''')
"""
)
return
baseurl = '''https://m.bseindia.com/IndicesView_New.aspx'''
res = requests.get(baseurl, headers=headers)
baseurl = """https://m.bseindia.com/IndicesView_New.aspx"""
res = requests.get(baseurl, headers=COMMON_REQUEST_HEADERS)
c = res.content
soup = bs(c, "lxml")
options = {
'__EVENTTARGET': 'ddl_Category',
'__VIEWSTATEENCRYPTED': '',
'__EVENTARGUMENT': '',
'__LASTFOCUS': '',
'__VIEWSTATEGENERATOR': '162C96CD',
'UcHeaderMenu1$txtGetQuote': '',
'__EVENTVALIDATION': '',
'__VIEWSTATE': ''
"__EVENTTARGET": "ddl_Category",
"__VIEWSTATEENCRYPTED": "",
"__EVENTARGUMENT": "",
"__LASTFOCUS": "",
"__VIEWSTATEGENERATOR": "162C96CD",
"UcHeaderMenu1$txtGetQuote": "",
"__EVENTVALIDATION": "",
"__VIEWSTATE": "",
}
for input in soup("input"):
try:
if(input['type'] == "hidden"):
if(input['id'] == '__VIEWSTATE'):
options['__VIEWSTATE'] = input['value']
elif(input['id'] == '__EVENTVALIDATION'):
options['__EVENTVALIDATION'] = input['value']
if input["type"] == "hidden":
if input["id"] == "__VIEWSTATE":
options["__VIEWSTATE"] = input["value"]
elif input["id"] == "__EVENTVALIDATION":
options["__EVENTVALIDATION"] = input["value"]
except KeyError:
continue
options['ddl_Category'] = ddl_category
res = requests.post(url=baseurl, data=options, headers=headers)
options["ddl_Category"] = ddl_category
res = requests.post(url=baseurl, data=options, headers=COMMON_REQUEST_HEADERS)
c = res.content
soup = bs(c, "lxml")
index_list = []
for td in soup('td'):
for td in soup("td"):
try:
if(td['class'][0] == 'TTRow_left'):
if td["class"][0] == "TTRow_left":
index = {}
index['currentValue'] = td.next_sibling.string.strip()
index['change'] = td.next_sibling.next_sibling.string.strip()
index['pChange'] = td.next_sibling.next_sibling.next_sibling.string.strip()
index['scripFlag'] = td.a['href'].strip().split('=')[1]
index['name'] = td.a.string.strip().replace(';', '')
index["currentValue"] = td.next_sibling.string.strip()
index["change"] = td.next_sibling.next_sibling.string.strip()
index[
"pChange"
] = td.next_sibling.next_sibling.next_sibling.string.strip()
index["scripFlag"] = td.a["href"].strip().split("=")[1]
index["name"] = td.a.string.strip().replace(";", "")
index_list.append(index)
except KeyError:
continue
results = {}
for span in soup("span", id="inddate"):
results['updatedOn'] = span.string[6:].split('|')[0].strip()
results['indices'] = index_list
results["updatedOn"] = span.string[6:].split("|")[0].strip()
results["indices"] = index_list
return results
13 changes: 5 additions & 8 deletions bsedata/losers.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,22 +24,19 @@
"""

from bsedata.helpers import COMMON_REQUEST_HEADERS
from bs4 import BeautifulSoup as bs
import requests

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45'
}


def getLosers() -> dict:
baseurl = '''https://m.bseindia.com'''
res = requests.get(baseurl, headers=headers)
baseurl = """https://m.bseindia.com"""
res = requests.get(baseurl, headers=COMMON_REQUEST_HEADERS)
c = res.content
soup = bs(c, "lxml")
for tag in soup("div"):
try:
if(tag['id'] == 'divLosers'):
if tag["id"] == "divLosers":
resSoup = tag
break
except KeyError:
Expand All @@ -54,7 +51,7 @@ def getLosers() -> dict:
"scripCode": str(tr.td.a["href"].split("=")[1]),
"LTP": str(td[1].string),
"change": str(td[2].string),
"pChange": str(td[3].string)
"pChange": str(td[3].string),
}
losers.append(loser)
return losers

0 comments on commit 211eb5f

Please sign in to comment.