Skip to content

Commit

Permalink
Removed .ix
Browse files Browse the repository at this point in the history
  • Loading branch information
saeedamen committed Aug 24, 2020
1 parent 312bf9b commit 8515260
Show file tree
Hide file tree
Showing 7 changed files with 36 additions and 25 deletions.
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,13 @@ In findatapy/examples you will find several demos

# Release Notes

* 0.1.13 - findatapy (24 Aug 2020)
* 0.1.12 - findatapy (06 May 2020)

# Coding log

* 24 Aug 2020
* Removed .ix references (to work with newer Pandas)
* 06 May 2020
* Amended function to remove points outside FX hours to exclude 1 Jan every year
* RetStats can now resample time series (removed kurtosis)
Expand Down
26 changes: 13 additions & 13 deletions findatapy/market/market.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,23 +598,23 @@ def extract_vol_surface_for_date(self, df, cross, date_index):
df_surf = pandas.DataFrame(index=strikes, columns=tenor)

for ten in tenor:
df_surf.ix["10DP", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
- (df.ix[date_index, cross + "10R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "10B" + ten + ".close"])
df_surf[ten]["10DP"] = df[cross + "V" + ten + ".close"][date_index] \
- (df[cross + "10R" + ten + ".close"][date_index] / 2.0) \
+ (df[cross + "10B" + ten + ".close"][date_index])

df_surf.ix["10DC", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
+ (df.ix[date_index, cross + "10R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "10B" + ten + ".close"])
df_surf[ten]["10DC"] = df[cross + "V" + ten + ".close"][date_index] \
+ (df[cross + "10R" + ten + ".close"][date_index] / 2.0) \
+ (df[cross + "10B" + ten + ".close"][date_index])

df_surf.ix["25DP", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
- (df.ix[date_index, cross + "25R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "25B" + ten + ".close"])
df_surf[ten]["25DP"] = df[cross + "V" + ten + ".close"][date_index] \
- (df[cross + "25R" + ten + ".close"][date_index] / 2.0) \
+ (df[cross + "25B" + ten + ".close"][date_index])

df_surf.ix["25DC", ten] = df.ix[date_index, cross + "V" + ten + ".close"] \
+ (df.ix[date_index, cross + "25R" + ten + ".close"] / 2.0) \
+ (df.ix[date_index, cross + "25B" + ten + ".close"])
df_surf[ten]["25DC"] = df[cross + "V" + ten + ".close"][date_index] \
+ (df[cross + "25R" + ten + ".close"][date_index] / 2.0) \
+ (df[cross + "25B" + ten + ".close"][date_index])

df_surf.ix["ATM", ten] = df.ix[date_index, cross + "V" + ten + ".close"]
df_surf[ten]["ATM"] = df[cross + "V" + ten + ".close"][date_index]

return df_surf

Expand Down
14 changes: 11 additions & 3 deletions findatapy/timeseries/calculations.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,10 @@ def calculate_individual_trade_gains(self, signal_data_frame, strategy_returns_d
# TODO experiment with quicker ways of writing below?
# for val in col.index:
# trade_returns.set_value(val, col_name, col[val])
# trade_returns.ix[val, col_name] = col[val]
# trade_returns[col_name][val] = col[val]

date_indices = trade_returns.index.searchsorted(col.index)
trade_returns.ix[date_indices, col_name] = col
trade_returns[col_name][date_indices] = col

return trade_returns

Expand Down Expand Up @@ -1175,6 +1175,14 @@ def average_by_hour_min_of_day_pretty_output(self, data_frame):

return data_frame

def average_by_hour_min_sec_of_day_pretty_output(self, data_frame):
data_frame = data_frame. \
groupby([data_frame.index.hour.rename('hour'), data_frame.index.minute.rename('minute'), data_frame.index.minute.rename('second')]).mean()

data_frame.index = data_frame.index.map(lambda t: datetime.time(*t))

return data_frame

def all_by_hour_min_of_day_pretty_output(self, data_frame):

df_new = []
Expand All @@ -1193,7 +1201,7 @@ def average_by_year_hour_min_of_day_pretty_output(self, data_frame):
# time_of_day = []
#
# for year in years:
# temp = data_frame.ix[data_frame.index.year == year]
# temp = data_frame[data_frame.index.year == year]
# time_of_day.append(temp.groupby(temp.index.time).mean())
#
# data_frame = pandas.concat(time_of_day, axis=1, keys = years)
Expand Down
4 changes: 2 additions & 2 deletions findatapy/timeseries/dataquality.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ def percentage_nan_between_start_finish_dates(self, df, df_properties, asset_fie
c_new = [x.split(".")[0] for x in df.columns]

index = df_dates.index.searchsorted(c_new)
start_date = df_dates.ix[index, start_date_field]
finish_date = df_dates.ix[index, finish_date_field]
start_date = df_dates[start_date_field][index]
finish_date = df_dates[finish_date_field][index]

for i in range(0, len(df.columns)):
df_sub = df[df.columns[i]]
Expand Down
10 changes: 5 additions & 5 deletions findatapy/timeseries/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def filter_time_series_by_holidays(self, data_frame, cal = 'FX', holidays_list =

# optimal case for weekdays: remove Saturday and Sunday
if (cal == 'WEEKDAY'):
return data_frame.ix[data_frame.index.dayofweek <= 4]
return data_frame[data_frame.index.dayofweek <= 4]

# select only those holidays in the sample
holidays_start = self.get_holidays(data_frame.index[0], data_frame.index[-1], cal, holidays_list = holidays_list)
Expand Down Expand Up @@ -179,7 +179,7 @@ def filter_time_series_by_holidays(self, data_frame, cal = 'FX', holidays_list =
# indices = list(range(counter, len(floored_dates)))
# indices_to_keep = indices_to_keep + indices
#
# data_frame_filtered = data_frame.ix[indices_to_keep]
# data_frame_filtered = data_frame[indices_to_keep]

if data_frame.index.tz is None:
holidays_start = holidays_start.tz_localize(None)
Expand All @@ -191,8 +191,8 @@ def filter_time_series_by_holidays(self, data_frame, cal = 'FX', holidays_list =
for i in range(0, len(holidays_start)):


data_frame_temp = data_frame_left.ix[data_frame_left.index < holidays_start[i]]
data_frame_left = data_frame_left.ix[data_frame_left.index >= holidays_end[i]]
data_frame_temp = data_frame_left[data_frame_left.index < holidays_start[i]]
data_frame_left = data_frame_left[data_frame_left.index >= holidays_end[i]]

data_frame_filtered.append(data_frame_temp)

Expand Down Expand Up @@ -385,7 +385,7 @@ def filter_time_series_aux(self, start_date, finish_date, data_frame, offset):
# if (0 <= start_index + offset < len(data_frame.index)):
# start_index = start_index + offset
#
# # data_frame = data_frame.ix[start_date < data_frame.index]
# # data_frame = data_frame[start_date < data_frame.index]
#
# if(finish_date is not None):
# finish_index = data_frame.index.searchsorted(finish_date)
Expand Down
2 changes: 1 addition & 1 deletion findatapy_examples/fxspotdata_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# run_example = 5 - download second FX data from Bloomberg
# run_example = 6 - download free tick data from FXCM example (compare with DukasCopy)

run_example = 6
run_example = 1

if run_example == 1 or run_example == 0:

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
tickers, using configuration files. There is also functionality which is particularly useful for those downloading FX market data."""

setup(name='findatapy',
version='0.1.12',
version='0.1.13',
description='Market data library',
author='Saeed Amen',
author_email='saeed@cuemacro.com',
Expand Down

0 comments on commit 8515260

Please sign in to comment.