Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ETFs data fetching #133

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
41 changes: 41 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,44 @@
Fork of https://github.com/vsjha18/nsetools
=======================
Changes
* ETF live data fetching

Installation
* mkdir stock_watch
* cd stock_watch
* git clone https://github.com/ask1234560/nsetools.git
* python3 -m venv environment
* pip install -r nsetools/requirements.txt

Example Script
```
import json
from nsetools import Nse

nse = Nse()

indexesQ = ['NIFTY 50', 'NIFTY NEXT 50', 'INDIA VIX']
etfQ = ["NIFTYBEES", "JUNIORBEES"]

outputETFs = []
outputIndexes = []

for q in etfQ:
jd = json.loads(nse.get_etf_quote(q, as_json=True))
items = [q, jd.get("pChange"), jd.get("change"), jd.get("lastPrice")]
outputETFs.append(items)

for q in indexesQ:
jd = json.loads(nse.get_index_quote(q, as_json=True))
items = [q, jd.get("pChange"), jd.get("change"), jd.get("lastPrice")]
outputIndexes.append(items)

print(*outputIndexes, "*"*100, *outputETFs, sep="\n")
```




Project Page
=============
http://nsetools.readthedocs.io
Expand Down
109 changes: 97 additions & 12 deletions nse.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,14 @@ def __init__(self):
# URL list
self.get_quote_url = 'https://www1.nseindia.com/live_market/dynaContent/live_watch/get_quote/GetQuote.jsp?'
self.stocks_csv_url = 'http://www1.nseindia.com/content/equities/EQUITY_L.csv'
self.etf_csv_url = 'https://www1.nseindia.com/content/equities/eq_etfseclist.csv'
self.top_gainer_url = 'http://www1.nseindia.com/live_market/dynaContent/live_analysis/gainers/niftyGainers1.json'
self.top_loser_url = 'http://www1.nseindia.com/live_market/dynaContent/live_analysis/losers/niftyLosers1.json'
self.top_fno_gainer_url\
= 'https://www1.nseindia.com/live_market/dynaContent/live_analysis/gainers/fnoGainers1.json'
self.top_fno_loser_url = 'https://www1.nseindia.com/live_market/dynaContent/live_analysis/losers/fnoLosers1.json'
self.advances_declines_url = 'http://www1.nseindia.com/common/json/indicesAdvanceDeclines.json'
self.index_url="http://www1.nseindia.com/homepage/Indices1.json"
self.index_url = "http://www1.nseindia.com/homepage/Indices1.json"
self.bhavcopy_base_url = "https://www1.nseindia.com/content/historical/EQUITIES/%s/%s/cm%s%s%sbhav.csv.zip"
self.bhavcopy_base_filename = "cm%s%s%sbhav.csv"
self.active_equity_monthly_url =\
Expand Down Expand Up @@ -96,7 +97,8 @@ def get_fno_lot_sizes(self, cached=True, as_json=False):
res = byte_adaptor(res)
for line in res.read().split('\n'):
if line != '' and re.search(',', line) and (line.casefold().find('symbol') == -1):
(code, name) = [x.strip() for x in line.split(',')[1:3]]
(code, name) = [x.strip()
for x in line.split(',')[1:3]]
res_dict[code] = int(name)
# else just skip the evaluation, line may not be a valid csv
else:
Expand Down Expand Up @@ -131,6 +133,33 @@ def get_stock_codes(self, cached=True, as_json=False):
self.__CODECACHE__ = res_dict
return self.render_response(self.__CODECACHE__, as_json)

def get_etf_codes(self, cached=True, as_json=False):
"""
returns a dictionary with key as etf code and value as etf name.
It also implements cache functionality and hits the server only
if user insists or cache is empty
:return: dict
"""
url = self.etf_csv_url
req = Request(url, None, self.headers)
res_dict = {}
if cached is not True or self.__CODECACHE__ is None:
# raises HTTPError and URLError
res = self.opener.open(req)
if res is not None:
# for py3 compat covert byte file like object to
# string file like object
res = byte_adaptor(res)
for line in res.read().split('\n'):
if line != '' and re.search(',', line):
(code, name) = line.split(',')[0:2]
res_dict[code] = name
# else just skip the evaluation, line may not be a valid csv
else:
raise Exception('no response received')
self.__CODECACHE__ = res_dict
return self.render_response(self.__CODECACHE__, as_json)

def is_valid_code(self, code):
"""
:param code: a string stock code
Expand All @@ -143,6 +172,18 @@ def is_valid_code(self, code):
else:
return False

def is_valid_etf(self, code):
"""
:param code: a string etf code
:return: Boolean
"""
if code:
etf_codes = self.get_etf_codes()
if code.upper() in etf_codes.keys():
return True
else:
return False

def get_quote(self, code, as_json=False):
"""
gets the quote for a given stock code
Expand All @@ -163,18 +204,59 @@ def get_quote(self, code, as_json=False):
res = byte_adaptor(res)
res = res.read()
# Now parse the response to get the relevant data
match = re.search(\
r'<div\s+id="responseDiv"\s+style="display:none">(.*?)</div>',
res, re.S
)
match = re.search(
r'<div\s+id="responseDiv"\s+style="display:none">(.*?)</div>',
res, re.S
)
try:
buffer = match.group(1).strip()
# commenting following two lines because now we are not using ast and instead
# relying on json's ability to do parsing. Should be much faster and more
# reliable.
# reliable.
#buffer = js_adaptor(buffer)
#response = self.clean_server_response(ast.literal_eval(buffer)['data'][0])
response = self.clean_server_response(json.loads(buffer)['data'][0])
response = self.clean_server_response(
json.loads(buffer)['data'][0])
except SyntaxError as err:
raise Exception('ill formatted response')
else:
return self.render_response(response, as_json)
else:
return None

def get_etf_quote(self, code, as_json=False):
"""
gets the quote for a given etf code
:param code:
:return: dict or None
:raises: HTTPError, URLError
"""
code = code.upper()
if self.is_valid_etf(code):
url = self.build_url_for_quote(code)
req = Request(url, None, self.headers)
# this can raise HTTPError and URLError, but we are not handling it
# north bound APIs should use it for exception handling
res = self.opener.open(req)

# for py3 compat covert byte file like object to
# string file like object
res = byte_adaptor(res)
res = res.read()
# Now parse the response to get the relevant data
match = re.search(
r'<div\s+id="responseDiv"\s+style="display:none">(.*?)</div>',
res, re.S
)
try:
buffer = match.group(1).strip()
# commenting following two lines because now we are not using ast and instead
# relying on json's ability to do parsing. Should be much faster and more
# reliable.
#buffer = js_adaptor(buffer)
#response = self.clean_server_response(ast.literal_eval(buffer)['data'][0])
response = self.clean_server_response(
json.loads(buffer)['data'][0])
except SyntaxError as err:
raise Exception('ill formatted response')
else:
Expand All @@ -195,7 +277,8 @@ def get_top_gainers(self, as_json=False):
res = byte_adaptor(res)
res_dict = json.load(res)
# clean the output and make appropriate type conversions
res_list = [self.clean_server_response(item) for item in res_dict['data']]
res_list = [self.clean_server_response(
item) for item in res_dict['data']]
return self.render_response(res_list, as_json)

def get_top_losers(self, as_json=False):
Expand Down Expand Up @@ -228,7 +311,8 @@ def get_top_fno_gainers(self, as_json=False):
res = byte_adaptor(res)
res_dict = json.load(res)
# clean the output and make appropriate type conversions
res_list = [self.clean_server_response(item) for item in res_dict['data']]
res_list = [self.clean_server_response(
item) for item in res_dict['data']]
return self.render_response(res_list, as_json)

def get_top_fno_losers(self, as_json=False):
Expand Down Expand Up @@ -289,7 +373,7 @@ def get_year_high(self, as_json=False):

def get_year_low(self, as_json=False):
return self._get_json_response_from_url(self.year_low_url, as_json)

def get_preopen_nifty(self, as_json=False):
return self._get_json_response_from_url(self.preopen_nifty_url, as_json)

Expand Down Expand Up @@ -375,7 +459,8 @@ def build_url_for_quote(self, code):
:return: a url object
"""
if code is not None and type(code) is str:
encoded_args = urlencode([('symbol', code), ('illiquid', '0'), ('smeFlag', '0'), ('itpFlag', '0')])
encoded_args = urlencode(
[('symbol', code), ('illiquid', '0'), ('smeFlag', '0'), ('itpFlag', '0')])
return self.get_quote_url + encoded_args
else:
raise Exception('code must be string')
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
sphinxcontrib-googleanalytics==0.1
sphinxcontrib-newsfeed==0.1.4
python-dateutil==2.8.2
six==1.16.0