-
-
Notifications
You must be signed in to change notification settings - Fork 2.2k
/
base.py
2163 lines (1911 loc) · 93.5 KB
/
base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# yfinance - market data downloader
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import datetime as _datetime
import json as _json
import logging
import time as _time
import warnings
from typing import Optional
from urllib.parse import quote as urlencode
import dateutil as _dateutil
import numpy as np
import pandas as pd
import requests
from . import shared
from . import utils
from .data import TickerData
from .scrapers.analysis import Analysis
from .scrapers.fundamentals import Fundamentals
from .scrapers.holders import Holders
from .scrapers.quote import Quote, FastInfo
from .const import _BASE_URL_, _ROOT_URL_
class TickerBase:
def __init__(self, ticker, session=None):
self.ticker = ticker.upper()
self.session = session
self._history = None
self._history_metadata = None
self._history_metadata_formatted = False
self._base_url = _BASE_URL_
self._tz = None
self._isin = None
self._news = []
self._shares = None
self._earnings_dates = {}
self._earnings = None
self._financials = None
# accept isin as ticker
if utils.is_isin(self.ticker):
self.ticker = utils.get_ticker_by_isin(self.ticker, None, session)
self._data: TickerData = TickerData(self.ticker, session=session)
self._analysis = Analysis(self._data)
self._holders = Holders(self._data)
self._quote = Quote(self._data)
self._fundamentals = Fundamentals(self._data)
self._fast_info = None
# Limit recursion depth when repairing prices
self._reconstruct_start_interval = None
@utils.log_indent_decorator
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False, actions=True,
auto_adjust=True, back_adjust=False, repair=False, keepna=False,
proxy=None, rounding=False, timeout=10,
debug=None, # deprecated
raise_errors=False) -> pd.DataFrame:
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime, inclusive.
Default is 99 years ago
E.g. for start="2020-01-01", the first data point will be on "2020-01-01"
end: str
Download end date string (YYYY-MM-DD) or _datetime, exclusive.
Default is now
E.g. for end="2023-01-01", the last data point will be on "2022-12-31"
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is True
back_adjust: bool
Back-adjusted data to mimic true historical prices
repair: bool
Detect currency unit 100x mixups and attempt repair.
Default is False
keepna: bool
Keep NaN rows returned by Yahoo?
Default is False
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
timeout: None or float
If not None stops waiting for a response after given number of
seconds. (Can also be a fraction of a second e.g. 0.01)
Default is 10 seconds.
debug: bool
If passed as False, will suppress message printing to console.
DEPRECATED, will be removed in future version
raise_errors: bool
If True, then raise errors as Exceptions instead of logging.
"""
logger = utils.get_yf_logger()
if debug is not None:
if debug:
utils.print_once(f"yfinance: Ticker.history(debug={debug}) argument is deprecated and will be removed in future version. Do this instead: logging.getLogger('yfinance').setLevel(logging.ERROR)")
logger.setLevel(logging.ERROR)
else:
utils.print_once(f"yfinance: Ticker.history(debug={debug}) argument is deprecated and will be removed in future version. Do this instead to suppress error messages: logging.getLogger('yfinance').setLevel(logging.CRITICAL)")
logger.setLevel(logging.CRITICAL)
start_user = start
end_user = end
if start or period is None or period.lower() == "max":
# Check can get TZ. Fail => probably delisted
tz = self._get_ticker_tz(proxy, timeout)
if tz is None:
# Every valid ticker has a timezone. Missing = problem
err_msg = "No timezone found, symbol may be delisted"
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if raise_errors:
raise Exception(f'{self.ticker}: {err_msg}')
else:
logger.error(f'{self.ticker}: {err_msg}')
return utils.empty_df()
if end is None:
end = int(_time.time())
else:
end = utils._parse_user_dt(end, tz)
if start is None:
if interval == "1m":
start = end - 604800 # Subtract 7 days
else:
max_start_datetime = pd.Timestamp.utcnow().floor("D") - _datetime.timedelta(days=99 * 365)
start = int(max_start_datetime.timestamp())
else:
start = utils._parse_user_dt(start, tz)
params = {"period1": start, "period2": end}
else:
period = period.lower()
params = {"range": period}
params["interval"] = interval.lower()
params["includePrePost"] = prepost
# 1) fix weired bug with Yahoo! - returning 60m for 30m bars
if params["interval"] == "30m":
params["interval"] = "15m"
# if the ticker is MUTUALFUND or ETF, then get capitalGains events
params["events"] = "div,splits,capitalGains"
params_pretty = dict(params)
tz = self._get_ticker_tz(proxy, timeout)
for k in ["period1", "period2"]:
if k in params_pretty:
params_pretty[k] = str(pd.Timestamp(params[k], unit='s').tz_localize("UTC").tz_convert(tz))
logger.debug(f'{self.ticker}: Yahoo GET parameters: {str(params_pretty)}')
# Getting data from json
url = f"{self._base_url}/v8/finance/chart/{self.ticker}"
data = None
get_fn = self._data.get
if end is not None:
end_dt = pd.Timestamp(end, unit='s').tz_localize("UTC")
dt_now = pd.Timestamp.utcnow()
data_delay = _datetime.timedelta(minutes=30)
if end_dt + data_delay <= dt_now:
# Date range in past so safe to fetch through cache:
get_fn = self._data.cache_get
try:
data = get_fn(
url=url,
params=params,
proxy=proxy,
timeout=timeout
)
if "Will be right back" in data.text or data is None:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
"Our engineers are working quickly to resolve "
"the issue. Thank you for your patience.")
data = data.json()
except Exception:
pass
# Store the meta data that gets retrieved simultaneously
try:
self._history_metadata = data["chart"]["result"][0]["meta"]
except Exception:
self._history_metadata = {}
intraday = params["interval"][-1] in ("m", 'h')
err_msg = "No price data found, symbol may be delisted"
if start or period is None or period.lower() == "max":
err_msg += f' ({params["interval"]} '
if start_user is not None:
err_msg += f'{start_user}'
elif not intraday:
err_msg += f'{pd.Timestamp(start, unit="s").tz_localize("UTC").tz_convert(tz).date()}'
else:
err_msg += f'{pd.Timestamp(start, unit="s").tz_localize("UTC").tz_convert(tz)}'
err_msg += ' -> '
if end_user is not None:
err_msg += f'{end_user})'
elif not intraday:
err_msg += f'{pd.Timestamp(end, unit="s").tz_localize("UTC").tz_convert(tz).date()})'
else:
err_msg += f'{pd.Timestamp(end, unit="s").tz_localize("UTC").tz_convert(tz)})'
else:
err_msg += f' (period={period})'
fail = False
if data is None or type(data) is not dict:
fail = True
elif type(data) is dict and 'status_code' in data:
err_msg += f"(Yahoo status_code = {data['status_code']})"
fail = True
elif "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
fail = True
elif "chart" not in data or data["chart"]["result"] is None or not data["chart"]["result"]:
fail = True
elif period is not None and "timestamp" not in data["chart"]["result"][0] and period not in \
self._history_metadata["validRanges"]:
# User provided a bad period. The minimum should be '1d', but sometimes Yahoo accepts '1h'.
err_msg = f"Period '{period}' is invalid, must be one of {self._history_metadata['validRanges']}"
fail = True
if fail:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if raise_errors:
raise Exception(f'{self.ticker}: {err_msg}')
else:
logger.error(f'{self.ticker}: {err_msg}')
if self._reconstruct_start_interval is not None and self._reconstruct_start_interval == interval:
self._reconstruct_start_interval = None
return utils.empty_df()
# parse quotes
try:
quotes = utils.parse_quotes(data["chart"]["result"][0])
# Yahoo bug fix - it often appends latest price even if after end date
if end and not quotes.empty:
endDt = pd.to_datetime(_datetime.datetime.utcfromtimestamp(end))
if quotes.index[quotes.shape[0] - 1] >= endDt:
quotes = quotes.iloc[0:quotes.shape[0] - 1]
except Exception:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if raise_errors:
raise Exception(f'{self.ticker}: {err_msg}')
else:
logger.error(f'{self.ticker}: {err_msg}')
if self._reconstruct_start_interval is not None and self._reconstruct_start_interval == interval:
self._reconstruct_start_interval = None
return shared._DFS[self.ticker]
logger.debug(f'{self.ticker}: yfinance received OHLC data: {quotes.index[0]} -> {quotes.index[-1]}')
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
logger.debug(f'{self.ticker}: resampling 30m OHLC from 15m')
quotes2 = quotes.resample('30T')
quotes = pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
quotes['Stock Splits'] = quotes2['Stock Splits'].max()
except Exception:
pass
# Select useful info from metadata
quote_type = self._history_metadata["instrumentType"]
expect_capital_gains = quote_type in ('MUTUALFUND', 'ETF')
tz_exchange = self._history_metadata["exchangeTimezoneName"]
# Note: ordering is important. If you change order, run the tests!
quotes = utils.set_df_tz(quotes, params["interval"], tz_exchange)
quotes = utils.fix_Yahoo_dst_issue(quotes, params["interval"])
quotes = utils.fix_Yahoo_returning_live_separate(quotes, params["interval"], tz_exchange)
intraday = params["interval"][-1] in ("m", 'h')
if not prepost and intraday and "tradingPeriods" in self._history_metadata:
tps = self._history_metadata["tradingPeriods"]
if not isinstance(tps, pd.DataFrame):
self._history_metadata = utils.format_history_metadata(self._history_metadata, tradingPeriodsOnly=True)
tps = self._history_metadata["tradingPeriods"]
quotes = utils.fix_Yahoo_returning_prepost_unrequested(quotes, params["interval"], tps)
logger.debug(f'{self.ticker}: OHLC after cleaning: {quotes.index[0]} -> {quotes.index[-1]}')
# actions
dividends, splits, capital_gains = utils.parse_actions(data["chart"]["result"][0])
if not expect_capital_gains:
capital_gains = None
if splits is not None:
splits = utils.set_df_tz(splits, interval, tz_exchange)
if dividends is not None:
dividends = utils.set_df_tz(dividends, interval, tz_exchange)
if capital_gains is not None:
capital_gains = utils.set_df_tz(capital_gains, interval, tz_exchange)
if start is not None:
if not quotes.empty:
startDt = quotes.index[0].floor('D')
if dividends is not None:
dividends = dividends.loc[startDt:]
if capital_gains is not None:
capital_gains = capital_gains.loc[startDt:]
if splits is not None:
splits = splits.loc[startDt:]
if end is not None:
endDt = pd.Timestamp(end, unit='s').tz_localize(tz)
if dividends is not None:
dividends = dividends[dividends.index < endDt]
if capital_gains is not None:
capital_gains = capital_gains[capital_gains.index < endDt]
if splits is not None:
splits = splits[splits.index < endDt]
# Prepare for combine
intraday = params["interval"][-1] in ("m", 'h')
if not intraday:
# If localizing a midnight during DST transition hour when clocks roll back,
# meaning clock hits midnight twice, then use the 2nd (ambiguous=True)
quotes.index = pd.to_datetime(quotes.index.date).tz_localize(tz_exchange, ambiguous=True, nonexistent='shift_forward')
if dividends.shape[0] > 0:
dividends.index = pd.to_datetime(dividends.index.date).tz_localize(tz_exchange, ambiguous=True, nonexistent='shift_forward')
if splits.shape[0] > 0:
splits.index = pd.to_datetime(splits.index.date).tz_localize(tz_exchange, ambiguous=True, nonexistent='shift_forward')
# Combine
df = quotes.sort_index()
if dividends.shape[0] > 0:
df = utils.safe_merge_dfs(df, dividends, interval)
if "Dividends" in df.columns:
df.loc[df["Dividends"].isna(), "Dividends"] = 0
else:
df["Dividends"] = 0.0
if splits.shape[0] > 0:
df = utils.safe_merge_dfs(df, splits, interval)
if "Stock Splits" in df.columns:
df.loc[df["Stock Splits"].isna(), "Stock Splits"] = 0
else:
df["Stock Splits"] = 0.0
if expect_capital_gains:
if capital_gains.shape[0] > 0:
df = utils.safe_merge_dfs(df, capital_gains, interval)
if "Capital Gains" in df.columns:
df.loc[df["Capital Gains"].isna(), "Capital Gains"] = 0
else:
df["Capital Gains"] = 0.0
logger.debug(f'{self.ticker}: OHLC after combining events: {quotes.index[0]} -> {quotes.index[-1]}')
df = df[~df.index.duplicated(keep='first')] # must do before repair
if isinstance(repair, str) and repair=='silent':
utils.log_once(logging.WARNING, f"yfinance: Ticker.history(repair='silent') value is deprecated and will be removed in future version. Repair now silent by default, use logging module to increase verbosity.")
repair = True
if repair:
# Do this before auto/back adjust
logger.debug(f'{self.ticker}: checking OHLC for repairs ...')
df = self._fix_unit_mixups(df, interval, tz_exchange, prepost)
df = self._fix_bad_stock_split(df, interval, tz_exchange)
# Must repair 100x and split errors before price reconstruction
df = self._fix_zeroes(df, interval, tz_exchange, prepost)
df = self._fix_missing_div_adjust(df, interval, tz_exchange)
df = df.sort_index()
# Auto/back adjust
try:
if auto_adjust:
df = utils.auto_adjust(df)
elif back_adjust:
df = utils.back_adjust(df)
except Exception as e:
if auto_adjust:
err_msg = "auto_adjust failed with %s" % e
else:
err_msg = "back_adjust failed with %s" % e
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if raise_errors:
raise Exception('%s: %s' % (self.ticker, err_msg))
else:
logger.error('%s: %s' % (self.ticker, err_msg))
if rounding:
df = np.round(df, data["chart"]["result"][0]["meta"]["priceHint"])
df['Volume'] = df['Volume'].fillna(0).astype(np.int64)
if intraday:
df.index.name = "Datetime"
else:
df.index.name = "Date"
self._history = df.copy()
# missing rows cleanup
if not actions:
df = df.drop(columns=["Dividends", "Stock Splits", "Capital Gains"], errors='ignore')
if not keepna:
mask_nan_or_zero = (df.isna() | (df == 0)).all(axis=1)
df = df.drop(mask_nan_or_zero.index[mask_nan_or_zero])
logger.debug(f'{self.ticker}: yfinance returning OHLC: {df.index[0]} -> {df.index[-1]}')
if self._reconstruct_start_interval is not None and self._reconstruct_start_interval == interval:
self._reconstruct_start_interval = None
return df
# ------------------------
@utils.log_indent_decorator
def _reconstruct_intervals_batch(self, df, interval, prepost, tag=-1):
# Reconstruct values in df using finer-grained price data. Delimiter marks what to reconstruct
logger = utils.get_yf_logger()
if not isinstance(df, pd.DataFrame):
raise Exception("'df' must be a Pandas DataFrame not", type(df))
if interval == "1m":
# Can't go smaller than 1m so can't reconstruct
return df
if interval[1:] in ['d', 'wk', 'mo']:
# Interday data always includes pre & post
prepost = True
intraday = False
else:
intraday = True
price_cols = [c for c in ["Open", "High", "Low", "Close", "Adj Close"] if c in df]
data_cols = price_cols + ["Volume"]
# If interval is weekly then can construct with daily. But if smaller intervals then
# restricted to recent times:
intervals = ["1wk", "1d", "1h", "30m", "15m", "5m", "2m", "1m"]
itds = {i: utils._interval_to_timedelta(interval) for i in intervals}
nexts = {intervals[i]: intervals[i + 1] for i in range(len(intervals) - 1)}
min_lookbacks = {"1wk": None, "1d": None, "1h": _datetime.timedelta(days=730)}
for i in ["30m", "15m", "5m", "2m"]:
min_lookbacks[i] = _datetime.timedelta(days=60)
min_lookbacks["1m"] = _datetime.timedelta(days=30)
if interval in nexts:
sub_interval = nexts[interval]
td_range = itds[interval]
else:
logger.warning(f"Have not implemented price repair for '{interval}' interval. Contact developers")
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
# Limit max reconstruction depth to 2:
if self._reconstruct_start_interval is None:
self._reconstruct_start_interval = interval
if interval != self._reconstruct_start_interval and interval != nexts[self._reconstruct_start_interval]:
logger.debug(f"{self.ticker}: Price repair has hit max depth of 2 ('%s'->'%s'->'%s')", self._reconstruct_start_interval, nexts[self._reconstruct_start_interval], interval)
return df
df = df.sort_index()
f_repair = df[data_cols].to_numpy() == tag
f_repair_rows = f_repair.any(axis=1)
# Ignore old intervals for which Yahoo won't return finer data:
m = min_lookbacks[sub_interval]
if m is None:
min_dt = None
else:
m -= _datetime.timedelta(days=1) # allow space for 1-day padding
min_dt = pd.Timestamp.utcnow() - m
min_dt = min_dt.tz_convert(df.index.tz).ceil("D")
logger.debug(f"min_dt={min_dt} interval={interval} sub_interval={sub_interval}")
if min_dt is not None:
f_recent = df.index >= min_dt
f_repair_rows = f_repair_rows & f_recent
if not f_repair_rows.any():
logger.info("Data too old to repair")
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
dts_to_repair = df.index[f_repair_rows]
indices_to_repair = np.where(f_repair_rows)[0]
if len(dts_to_repair) == 0:
logger.info("Nothing needs repairing (dts_to_repair[] empty)")
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
df_v2 = df.copy()
if "Repaired?" not in df_v2.columns:
df_v2["Repaired?"] = False
f_good = ~(df[price_cols].isna().any(axis=1))
f_good = f_good & (df[price_cols].to_numpy() != tag).all(axis=1)
df_good = df[f_good]
# Group nearby NaN-intervals together to reduce number of Yahoo fetches
dts_groups = [[dts_to_repair[0]]]
last_dt = dts_to_repair[0]
last_ind = indices_to_repair[0]
td = utils._interval_to_timedelta(interval)
# Note on setting max size: have to allow space for adding good data
if sub_interval == "1mo":
grp_max_size = _dateutil.relativedelta.relativedelta(years=2)
elif sub_interval == "1wk":
grp_max_size = _dateutil.relativedelta.relativedelta(years=2)
elif sub_interval == "1d":
grp_max_size = _dateutil.relativedelta.relativedelta(years=2)
elif sub_interval == "1h":
grp_max_size = _dateutil.relativedelta.relativedelta(years=1)
elif sub_interval == "1m":
grp_max_size = _datetime.timedelta(days=5) # allow 2 days for buffer below
else:
grp_max_size = _datetime.timedelta(days=30)
logger.debug(f"grp_max_size = {grp_max_size}")
for i in range(1, len(dts_to_repair)):
ind = indices_to_repair[i]
dt = dts_to_repair[i]
if dt.date() < dts_groups[-1][0].date() + grp_max_size:
dts_groups[-1].append(dt)
else:
dts_groups.append([dt])
last_dt = dt
last_ind = ind
logger.debug("Repair groups:")
for g in dts_groups:
logger.debug(f"- {g[0]} -> {g[-1]}")
# Add some good data to each group, so can calibrate prices later:
for i in range(len(dts_groups)):
g = dts_groups[i]
g0 = g[0]
i0 = df_good.index.get_indexer([g0], method="nearest")[0]
if i0 > 0:
if (min_dt is None or df_good.index[i0 - 1] >= min_dt) and \
((not intraday) or df_good.index[i0 - 1].date() == g0.date()):
i0 -= 1
gl = g[-1]
il = df_good.index.get_indexer([gl], method="nearest")[0]
if il < len(df_good) - 1:
if (not intraday) or df_good.index[il + 1].date() == gl.date():
il += 1
good_dts = df_good.index[i0:il + 1]
dts_groups[i] += good_dts.to_list()
dts_groups[i].sort()
n_fixed = 0
for g in dts_groups:
df_block = df[df.index.isin(g)]
logger.debug("df_block:\n" + str(df_block))
start_dt = g[0]
start_d = start_dt.date()
reject = False
if sub_interval == "1h" and (_datetime.date.today() - start_d) > _datetime.timedelta(days=729):
reject = True
elif sub_interval in ["30m", "15m"] and (_datetime.date.today() - start_d) > _datetime.timedelta(days=59):
reject = True
if reject:
# Don't bother requesting more price data, Yahoo will reject
msg = f"Cannot reconstruct {interval} block starting"
if intraday:
msg += f" {start_dt}"
else:
msg += f" {start_d}"
msg += ", too old, Yahoo will reject request for finer-grain data"
logger.info(msg)
continue
td_1d = _datetime.timedelta(days=1)
end_dt = g[-1]
end_d = end_dt.date() + td_1d
if interval in "1wk":
fetch_start = start_d - td_range # need previous week too
fetch_end = g[-1].date() + td_range
elif interval == "1d":
fetch_start = start_d
fetch_end = g[-1].date() + td_range
else:
fetch_start = g[0]
fetch_end = g[-1] + td_range
# The first and last day returned by Yahoo can be slightly wrong, so add buffer:
fetch_start -= td_1d
fetch_end += td_1d
if intraday:
fetch_start = fetch_start.date()
fetch_end = fetch_end.date() + td_1d
if min_dt is not None:
fetch_start = max(min_dt.date(), fetch_start)
logger.debug(f"Fetching {sub_interval} prepost={prepost} {fetch_start}->{fetch_end}")
df_fine = self.history(start=fetch_start, end=fetch_end, interval=sub_interval, auto_adjust=False, actions=True, prepost=prepost, repair=True, keepna=True)
if df_fine is None or df_fine.empty:
msg = f"Cannot reconstruct {interval} block starting"
if intraday:
msg += f" {start_dt}"
else:
msg += f" {start_d}"
msg += ", too old, Yahoo is rejecting request for finer-grain data"
logger.debug(msg)
continue
# Discard the buffer
df_fine = df_fine.loc[g[0]: g[-1] + itds[sub_interval] - _datetime.timedelta(milliseconds=1)].copy()
if df_fine.empty:
msg = f"Cannot reconstruct {interval} block range"
if intraday:
msg += f" {start_dt}->{end_dt}"
else:
msg += f" {start_d}->{end_d}"
msg += ", Yahoo not returning finer-grain data within range"
logger.debug(msg)
continue
df_fine["ctr"] = 0
if interval == "1wk":
weekdays = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
week_end_day = weekdays[(df_block.index[0].weekday() + 7 - 1) % 7]
df_fine["Week Start"] = df_fine.index.tz_localize(None).to_period("W-" + week_end_day).start_time
grp_col = "Week Start"
elif interval == "1d":
df_fine["Day Start"] = pd.to_datetime(df_fine.index.date)
grp_col = "Day Start"
else:
df_fine.loc[df_fine.index.isin(df_block.index), "ctr"] = 1
df_fine["intervalID"] = df_fine["ctr"].cumsum()
df_fine = df_fine.drop("ctr", axis=1)
grp_col = "intervalID"
df_fine = df_fine[~df_fine[price_cols + ['Dividends']].isna().all(axis=1)]
df_fine_grp = df_fine.groupby(grp_col)
df_new = df_fine_grp.agg(
Open=("Open", "first"),
Close=("Close", "last"),
AdjClose=("Adj Close", "last"),
Low=("Low", "min"),
High=("High", "max"),
Dividends=("Dividends", "sum"),
Volume=("Volume", "sum")).rename(columns={"AdjClose": "Adj Close"})
if grp_col in ["Week Start", "Day Start"]:
df_new.index = df_new.index.tz_localize(df_fine.index.tz)
else:
df_fine["diff"] = df_fine["intervalID"].diff()
new_index = np.append([df_fine.index[0]], df_fine.index[df_fine["intervalID"].diff() > 0])
df_new.index = new_index
logger.debug('df_new:' + '\n' + str(df_new))
# Calibrate!
common_index = np.intersect1d(df_block.index, df_new.index)
if len(common_index) == 0:
# Can't calibrate so don't attempt repair
logger.info(f"Can't calibrate {interval} block starting {start_d} so aborting repair")
continue
# First, attempt to calibrate the 'Adj Close' column. OK if cannot.
# Only necessary for 1d interval, because the 1h data is not div-adjusted.
if interval == '1d':
df_new_calib = df_new[df_new.index.isin(common_index)]
df_block_calib = df_block[df_block.index.isin(common_index)]
f_tag = df_block_calib['Adj Close'] == tag
if f_tag.any():
div_adjusts = df_block_calib['Adj Close'] / df_block_calib['Close']
# The loop below assumes each 1d repair is isoloated, i.e. surrounded by
# good data. Which is case most of time.
# But in case are repairing a chunk of bad 1d data, back/forward-fill the
# good div-adjustments - not perfect, but a good backup.
div_adjusts[f_tag] = np.nan
div_adjusts = div_adjusts.fillna(method='bfill').fillna(method='ffill')
for idx in np.where(f_tag)[0]:
dt = df_new_calib.index[idx]
n = len(div_adjusts)
if df_new.loc[dt, "Dividends"] != 0:
if idx < n - 1:
# Easy, take div-adjustment from next-day
div_adjusts[idx] = div_adjusts[idx + 1]
else:
# Take previous-day div-adjustment and reverse todays adjustment
div_adj = 1.0 - df_new_calib["Dividends"].iloc[idx] / df_new_calib['Close'].iloc[
idx - 1]
div_adjusts[idx] = div_adjusts[idx - 1] / div_adj
else:
if idx > 0:
# Easy, take div-adjustment from previous-day
div_adjusts[idx] = div_adjusts[idx - 1]
else:
# Must take next-day div-adjustment
div_adjusts[idx] = div_adjusts[idx + 1]
if df_new_calib["Dividends"].iloc[idx + 1] != 0:
div_adjusts[idx] *= 1.0 - df_new_calib["Dividends"].iloc[idx + 1] / \
df_new_calib['Close'].iloc[idx]
f_close_bad = df_block_calib['Close'] == tag
df_new['Adj Close'] = df_block['Close'] * div_adjusts
if f_close_bad.any():
df_new.loc[f_close_bad, 'Adj Close'] = df_new['Close'][f_close_bad] * div_adjusts[f_close_bad]
# Check whether 'df_fine' has different split-adjustment.
# If different, then adjust to match 'df'
calib_cols = ['Open', 'Close']
df_new_calib = df_new[df_new.index.isin(common_index)][calib_cols].to_numpy()
df_block_calib = df_block[df_block.index.isin(common_index)][calib_cols].to_numpy()
calib_filter = (df_block_calib != tag)
if not calib_filter.any():
# Can't calibrate so don't attempt repair
logger.info(f"Can't calibrate {interval} block starting {start_d} so aborting repair")
continue
# Avoid divide-by-zero warnings:
for j in range(len(calib_cols)):
f = ~calib_filter[:, j]
if f.any():
df_block_calib[f, j] = 1
df_new_calib[f, j] = 1
ratios = df_block_calib[calib_filter] / df_new_calib[calib_filter]
weights = df_fine_grp.size()
weights.index = df_new.index
weights = weights[weights.index.isin(common_index)].to_numpy().astype(float)
weights = weights[:, None] # transpose
weights = np.tile(weights, len(calib_cols)) # 1D -> 2D
weights = weights[calib_filter] # flatten
not1 = ~np.isclose(ratios, 1.0, rtol=0.00001)
if np.sum(not1) == len(calib_cols):
# Only 1 calibration row in df_new is different to df_block so ignore
ratio = 1.0
else:
ratio = np.average(ratios, weights=weights)
logger.debug(f"Price calibration ratio (raw) = {ratio:6f}")
ratio_rcp = round(1.0 / ratio, 1)
ratio = round(ratio, 1)
if ratio == 1 and ratio_rcp == 1:
# Good!
pass
else:
if ratio > 1:
# data has different split-adjustment than fine-grained data
# Adjust fine-grained to match
df_new[price_cols] *= ratio
df_new["Volume"] /= ratio
elif ratio_rcp > 1:
# data has different split-adjustment than fine-grained data
# Adjust fine-grained to match
df_new[price_cols] *= 1.0 / ratio_rcp
df_new["Volume"] *= ratio_rcp
# Repair!
bad_dts = df_block.index[(df_block[price_cols + ["Volume"]] == tag).to_numpy().any(axis=1)]
no_fine_data_dts = []
for idx in bad_dts:
if idx not in df_new.index:
# Yahoo didn't return finer-grain data for this interval,
# so probably no trading happened.
no_fine_data_dts.append(idx)
if len(no_fine_data_dts) > 0:
logger.debug(f"Yahoo didn't return finer-grain data for these intervals: " + str(no_fine_data_dts))
for idx in bad_dts:
if idx not in df_new.index:
# Yahoo didn't return finer-grain data for this interval,
# so probably no trading happened.
continue
df_new_row = df_new.loc[idx]
if interval == "1wk":
df_last_week = df_new.iloc[df_new.index.get_loc(idx) - 1]
df_fine = df_fine.loc[idx:]
df_bad_row = df.loc[idx]
bad_fields = df_bad_row.index[df_bad_row == tag].to_numpy()
if "High" in bad_fields:
df_v2.loc[idx, "High"] = df_new_row["High"]
if "Low" in bad_fields:
df_v2.loc[idx, "Low"] = df_new_row["Low"]
if "Open" in bad_fields:
if interval == "1wk" and idx != df_fine.index[0]:
# Exchange closed Monday. In this case, Yahoo sets Open to last week close
df_v2.loc[idx, "Open"] = df_last_week["Close"]
df_v2.loc[idx, "Low"] = min(df_v2.loc[idx, "Open"], df_v2.loc[idx, "Low"])
else:
df_v2.loc[idx, "Open"] = df_new_row["Open"]
if "Close" in bad_fields:
df_v2.loc[idx, "Close"] = df_new_row["Close"]
# Assume 'Adj Close' also corrupted, easier than detecting whether true
df_v2.loc[idx, "Adj Close"] = df_new_row["Adj Close"]
elif "Adj Close" in bad_fields:
df_v2.loc[idx, "Adj Close"] = df_new_row["Adj Close"]
if "Volume" in bad_fields:
df_v2.loc[idx, "Volume"] = df_new_row["Volume"]
df_v2.loc[idx, "Repaired?"] = True
n_fixed += 1
return df_v2
@utils.log_indent_decorator
def _fix_unit_mixups(self, df, interval, tz_exchange, prepost):
if df.empty:
return df
df2 = self._fix_unit_switch(df, interval, tz_exchange)
df3 = self._fix_unit_random_mixups(df2, interval, tz_exchange, prepost)
return df3
@utils.log_indent_decorator
def _fix_unit_random_mixups(self, df, interval, tz_exchange, prepost):
# Sometimes Yahoo returns few prices in cents/pence instead of $/£
# I.e. 100x bigger
# 2 ways this manifests:
# - random 100x errors spread throughout table
# - a sudden switch between $<->cents at some date
# This function fixes the first.
if df.empty:
return df
# Easy to detect and fix, just look for outliers = ~100x local median
logger = utils.get_yf_logger()
if df.shape[0] == 0:
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
if df.shape[0] == 1:
# Need multiple rows to confidently identify outliers
logger.info("price-repair-100x: Cannot check single-row table for 100x price errors")
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
df2 = df.copy()
if df2.index.tz is None:
df2.index = df2.index.tz_localize(tz_exchange)
elif df2.index.tz != tz_exchange:
df2.index = df2.index.tz_convert(tz_exchange)
# Only import scipy if users actually want function. To avoid
# adding it to dependencies.
from scipy import ndimage as _ndimage
data_cols = ["High", "Open", "Low", "Close", "Adj Close"] # Order important, separate High from Low
data_cols = [c for c in data_cols if c in df2.columns]
f_zeroes = (df2[data_cols] == 0).any(axis=1).to_numpy()
if f_zeroes.any():
df2_zeroes = df2[f_zeroes]
df2 = df2[~f_zeroes]
else:
df2_zeroes = None
if df2.shape[0] <= 1:
logger.info("price-repair-100x: Insufficient good data for detecting 100x price errors")
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
df2_data = df2[data_cols].to_numpy()
median = _ndimage.median_filter(df2_data, size=(3, 3), mode="wrap")
ratio = df2_data / median
ratio_rounded = (ratio / 20).round() * 20 # round ratio to nearest 20
f = ratio_rounded == 100
ratio_rcp = 1.0/ratio
ratio_rcp_rounded = (ratio_rcp / 20).round() * 20 # round ratio to nearest 20
f_rcp = (ratio_rounded == 100) | (ratio_rcp_rounded == 100)
f_either = f | f_rcp
if not f_either.any():
logger.info("price-repair-100x: No sporadic 100x errors")
if "Repaired?" not in df.columns:
df["Repaired?"] = False
return df
# Mark values to send for repair
tag = -1.0
for i in range(len(data_cols)):
fi = f_either[:, i]
c = data_cols[i]
df2.loc[fi, c] = tag
n_before = (df2_data == tag).sum()
df2 = self._reconstruct_intervals_batch(df2, interval, prepost, tag)
df2_tagged = df2[data_cols].to_numpy() == tag
n_after = (df2[data_cols].to_numpy() == tag).sum()
if n_after > 0:
# This second pass will *crudely* "fix" any remaining errors in High/Low
# simply by ensuring they don't contradict e.g. Low = 100x High.
f = (df2[data_cols].to_numpy() == tag) & f
for i in range(f.shape[0]):
fi = f[i, :]
if not fi.any():
continue
idx = df2.index[i]
for c in ['Open', 'Close']:
j = data_cols.index(c)
if fi[j]:
df2.loc[idx, c] = df.loc[idx, c] * 0.01
c = "High" ; j = data_cols.index(c)
if fi[j]:
df2.loc[idx, c] = df2.loc[idx, ["Open", "Close"]].max()
c = "Low" ; j = data_cols.index(c)
if fi[j]:
df2.loc[idx, c] = df2.loc[idx, ["Open", "Close"]].min()
f_rcp = (df2[data_cols].to_numpy() == tag) & f_rcp
for i in range(f_rcp.shape[0]):
fi = f_rcp[i, :]
if not fi.any():
continue
idx = df2.index[i]
for c in ['Open', 'Close']:
j = data_cols.index(c)
if fi[j]:
df2.loc[idx, c] = df.loc[idx, c] * 100.0
c = "High" ; j = data_cols.index(c)
if fi[j]:
df2.loc[idx, c] = df2.loc[idx, ["Open", "Close"]].max()
c = "Low" ; j = data_cols.index(c)
if fi[j]:
df2.loc[idx, c] = df2.loc[idx, ["Open", "Close"]].min()
df2_tagged = df2[data_cols].to_numpy() == tag
n_after_crude = df2_tagged.sum()
else:
n_after_crude = n_after
n_fixed = n_before - n_after_crude
n_fixed_crudely = n_after - n_after_crude
if n_fixed > 0:
report_msg = f"{self.ticker}: fixed {n_fixed}/{n_before} currency unit mixups "
if n_fixed_crudely > 0:
report_msg += f"({n_fixed_crudely} crudely) "
report_msg += f"in {interval} price data"
logger.info('price-repair-100x: ' + report_msg)
# Restore original values where repair failed
f_either = df2[data_cols].to_numpy() == tag
for j in range(len(data_cols)):
fj = f_either[:, j]
if fj.any():
c = data_cols[j]
df2.loc[fj, c] = df.loc[fj, c]
if df2_zeroes is not None:
if "Repaired?" not in df2_zeroes.columns:
df2_zeroes["Repaired?"] = False
df2 = pd.concat([df2, df2_zeroes]).sort_index()
df2.index = pd.to_datetime()
return df2
@utils.log_indent_decorator
def _fix_unit_switch(self, df, interval, tz_exchange):
# Sometimes Yahoo returns few prices in cents/pence instead of $/£
# I.e. 100x bigger
# 2 ways this manifests:
# - random 100x errors spread throughout table
# - a sudden switch between $<->cents at some date
# This function fixes the second.
# Eventually Yahoo fixes but could take them 2 weeks.
return self._fix_prices_sudden_change(df, interval, tz_exchange, 100.0)
@utils.log_indent_decorator
def _fix_zeroes(self, df, interval, tz_exchange, prepost):