diff --git a/.travis.yml b/.travis.yml index d330e85..178fd3a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,18 +9,18 @@ matrix: env: PANDAS_VERSION=0.17.1 NUMPY_VERSION=1.10.4 SCIPY_VERSION=0.16.1 LIBGFORTRAN_VERSION=1.0 - python: 2.7 env: PANDAS_VERSION=0.18.1 NUMPY_VERSION=1.11.1 SCIPY_VERSION=0.17.1 LIBGFORTRAN_VERSION=3.0 - - - python: 3.4 - env: PANDAS_VERSION=0.16.1 NUMPY_VERSION=1.9.2 SCIPY_VERSION=0.15.1 LIBGFORTRAN_VERSION=1.0 + - python: 2.7 + env: PANDAS_VERSION=0.19.2 NUMPY_VERSION=1.12.1 SCIPY_VERSION=0.19.0 LIBGFORTRAN_VERSION=3.0 + - python: 2.7 + env: PANDAS_VERSION=0.20.1 NUMPY_VERSION=1.12.1 SCIPY_VERSION=0.19.0 LIBGFORTRAN_VERSION=3.0 - python: 3.4 env: PANDAS_VERSION=0.17.1 NUMPY_VERSION=1.10.4 SCIPY_VERSION=0.16.1 LIBGFORTRAN_VERSION=1.0 - python: 3.4 env: PANDAS_VERSION=0.18.1 NUMPY_VERSION=1.11.1 SCIPY_VERSION=0.17.1 LIBGFORTRAN_VERSION=3.0 - - - python: 3.5 - env: PANDAS_VERSION=0.17.1 NUMPY_VERSION=1.10.4 SCIPY_VERSION=0.16.1 LIBGFORTRAN_VERSION=1.0 - python: 3.5 - env: PANDAS_VERSION=0.18.1 NUMPY_VERSION=1.11.1 SCIPY_VERSION=0.17.1 LIBGFORTRAN_VERSION=3.0 + env: PANDAS_VERSION=0.19.2 NUMPY_VERSION=1.12.1 SCIPY_VERSION=0.19.0 LIBGFORTRAN_VERSION=3.0 + - python: 3.6 + env: PANDAS_VERSION=0.20.1 NUMPY_VERSION=1.12.1 SCIPY_VERSION=0.19.0 LIBGFORTRAN_VERSION=3.0 before_install: # We do this conditionally because it saves us some downloading if the diff --git a/README.md b/README.md index 99347d2..ef784b1 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,77 @@ # empyrical + Common financial risk metrics. -## Installation +## Table of Contents -### Installing with `pip` +- [Installation](#installation) +- [Usage](#usage) +- [Support](#support) +- [Contributing](#contributing) +- [Testing](#testing) +## Installation ``` pip install empyrical ``` +## Usage + +Simple Statistics +```python +import numpy as np +from empyrical import max_drawdown, alpha_beta + +returns = np.array([.01, .02, .03, -.4, -.06, -.02]) +benchmark_returns = np.array([.02, .02, .03, -.35, -.05, -.01]) + +# calculate the max drawdown +max_drawdown(returns) + +# calculate alpha and beta +alpha, beta = alpha_beta(returns, benchmark_returns) + +``` + +Rolling Measures +```python +import numpy as np +from empyrical import roll_max_drawdown + +returns = np.array([.01, .02, .03, -.4, -.06, -.02]) + +# calculate the rolling max drawdown +roll_max_drawdown(returns, window=3) + +``` + +Pandas Support +```python +import pandas as pd +from empyrical import roll_up_capture, capture + +returns = pd.Series([.01, .02, .03, ... -.4, -.06, -.02]) + +# calculate a capture ratio +capture(returns) + +# calculate capture for up markets on a rolling 60 day basis +roll_up_capture(returns, window=60) +``` + +## Support + +Please [open an issue](https://github.com/quantopian/empyrical/issues/new) for support. + +## Contributing + +Please contribute using [Github Flow](https://guides.github.com/introduction/flow/). Create a branch, add commits, and [open a pull request](https://github.com/quantopian/empyrical/compare/). + +## Testing +- install requirements + - "nose>=1.3.7", + - "parameterized>=0.6.1" + +``` +python -m unittest +``` diff --git a/empyrical/__init__.py b/empyrical/__init__.py index bb58fab..8f95e04 100644 --- a/empyrical/__init__.py +++ b/empyrical/__init__.py @@ -30,7 +30,7 @@ sharpe_ratio, sortino_ratio, downside_risk, - information_ratio, + excess_sharpe, alpha_beta, alpha, beta, @@ -40,6 +40,21 @@ stability_of_timeseries, tail_ratio, cagr, + capture, + up_capture, + down_capture, + up_down_capture, + up_alpha_beta, + down_alpha_beta, + roll_max_drawdown, + roll_up_capture, + roll_down_capture, + roll_up_down_capture, + roll_alpha_beta, + roll_sharpe_ratio, +) + +from .periods import ( DAILY, WEEKLY, MONTHLY, diff --git a/empyrical/periods.py b/empyrical/periods.py new file mode 100644 index 0000000..53869ac --- /dev/null +++ b/empyrical/periods.py @@ -0,0 +1,17 @@ +APPROX_BDAYS_PER_MONTH = 21 +APPROX_BDAYS_PER_YEAR = 252 + +MONTHS_PER_YEAR = 12 +WEEKS_PER_YEAR = 52 + +DAILY = 'daily' +WEEKLY = 'weekly' +MONTHLY = 'monthly' +YEARLY = 'yearly' + +ANNUALIZATION_FACTORS = { + DAILY: APPROX_BDAYS_PER_YEAR, + WEEKLY: WEEKS_PER_YEAR, + MONTHLY: MONTHS_PER_YEAR, + YEARLY: 1 +} diff --git a/empyrical/stats.py b/empyrical/stats.py index 3a7615b..38ab5ef 100644 --- a/empyrical/stats.py +++ b/empyrical/stats.py @@ -20,26 +20,9 @@ from scipy import stats from six import iteritems -from .utils import nanmean, nanstd, nanmin - - -APPROX_BDAYS_PER_MONTH = 21 -APPROX_BDAYS_PER_YEAR = 252 - -MONTHS_PER_YEAR = 12 -WEEKS_PER_YEAR = 52 - -DAILY = 'daily' -WEEKLY = 'weekly' -MONTHLY = 'monthly' -YEARLY = 'yearly' - -ANNUALIZATION_FACTORS = { - DAILY: APPROX_BDAYS_PER_YEAR, - WEEKLY: WEEKS_PER_YEAR, - MONTHLY: MONTHS_PER_YEAR, - YEARLY: 1 -} +from .utils import nanmean, nanstd, nanmin, up, down, roll +from .periods import ANNUALIZATION_FACTORS, APPROX_BDAYS_PER_YEAR +from .periods import DAILY, WEEKLY, MONTHLY, YEARLY def _adjust_returns(returns, adjustment_factor): @@ -240,8 +223,12 @@ def max_drawdown(returns): if len(returns) < 1: return np.nan - cumulative = cum_returns(returns, starting_value=100) + if type(returns) == pd.Series: + returns = returns.values + + cumulative = np.insert(cum_returns(returns, starting_value=100), 0, 100) max_return = np.fmax.accumulate(cumulative) + return nanmin((cumulative - max_return) / max_return) @@ -587,9 +574,9 @@ def downside_risk(returns, required_return=0, period=DAILY, return dside_risk -def information_ratio(returns, factor_returns): +def excess_sharpe(returns, factor_returns): """ - Determines the Information ratio of a strategy. + Determines the Excess Sharpe of a strategy. Parameters ---------- @@ -602,11 +589,12 @@ def information_ratio(returns, factor_returns): Returns ------- float - The information ratio. + The excess sharpe. Note ----- - See https://en.wikipedia.org/wiki/information_ratio for more details. + The excess Sharpe is a simplified Information Ratio that uses + tracking error rather than "active risk" as the denominator. """ if len(returns) < 2: @@ -724,7 +712,6 @@ def alpha_beta_aligned(returns, factor_returns, risk_free=0.0, period=DAILY, Alpha. float Beta. - """ b = beta_aligned(returns, factor_returns, risk_free) a = alpha_aligned(returns, factor_returns, risk_free, period, @@ -1005,6 +992,227 @@ def cagr(returns, period=DAILY, annualization=None): return ending_value ** (1. / no_years) - 1 +def capture(returns, factor_returns, period=DAILY): + """ + Compute capture ratio. + + Parameters + ---------- + returns : pd.Series or np.ndarray + Returns of the strategy, noncumulative. + - See full explanation in :func:`~empyrical.stats.cum_returns`. + factor_returns : pd.Series or np.ndarray + Noncumulative returns of the factor to which beta is + computed. Usually a benchmark such as the market. + - This is in the same style as returns. + period : str, optional + Defines the periodicity of the 'returns' data for purposes of + annualizing. Value ignored if `annualization` parameter is specified. + Defaults are: + 'monthly':12 + 'weekly': 52 + 'daily': 252 + Returns + ------- + float, np.nan + The capture ratio. + + Notes + ----- + See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for + details. + """ + return (annual_return(returns, period=period) / + annual_return(factor_returns, period=period)) + + +def up_capture(returns, factor_returns, **kwargs): + """ + Compute the capture ratio for periods when the benchmark return is positive + + Parameters + ---------- + see documentation for `capture`. + + Returns + ------- + float, np.nan + + Notes + ----- + See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for + more information. + """ + return up(returns, factor_returns, function=capture, **kwargs) + + +def down_capture(returns, factor_returns, **kwargs): + """ + Compute the capture ratio for periods when the benchmark return is negative + + Parameters + ---------- + see documentation for `capture`. + + Returns + ------- + float, np.nan + + Note + ---- + See http://www.investopedia.com/terms/d/down-market-capture-ratio.asp for + more information. + """ + return down(returns, factor_returns, function=capture, **kwargs) + + +def up_down_capture(returns, factor_returns, **kwargs): + """ + Computes the ratio of up_capture to down_capture. + + Parameters + ---------- + see documentation for `capture`. + + Returns + ------- + float + the updown capture ratio + """ + return (up_capture(returns, factor_returns, **kwargs) / + down_capture(returns, factor_returns, **kwargs)) + + +def up_alpha_beta(returns, factor_returns, **kwargs): + """ + Computes alpha and beta for periods when the benchmark return is positive. + + Parameters + ---------- + see documentation for `alpha_beta`. + + Returns + ------- + float + Alpha. + float + Beta. + """ + return up(returns, factor_returns, function=alpha_beta_aligned, **kwargs) + + +def down_alpha_beta(returns, factor_returns, **kwargs): + """ + Computes alpha and beta for periods when the benchmark return is negative. + + Parameters + ---------- + see documentation for `alpha_beta`. + + Returns + ------- + float + Alpha. + float + Beta. + """ + return down(returns, factor_returns, function=alpha_beta_aligned, **kwargs) + + +def roll_up_capture(returns, factor_returns, window=10, **kwargs): + """ + Computes the up capture measure over a rolling window. + + Parameters + ---------- + see documentation for `capture` (pass all args, kwargs required) + + window : int, required + Size of the rolling window in terms of the periodicity of the data. + - eg window = 60, periodicity=DAILY, represents a rolling 60 day window + """ + return roll(returns, factor_returns, window=window, function=up_capture, + **kwargs) + + +def roll_down_capture(returns, factor_returns, window=10, **kwargs): + """ + Computes the down capture measure over a rolling window. + + Parameters + ---------- + see documentation for `capture` (pass all args, kwargs required) + + window : int, required + Size of the rolling window in terms of the periodicity of the data. + - eg window = 60, periodicity=DAILY, represents a rolling 60 day window + """ + return roll(returns, factor_returns, window=window, function=down_capture, + **kwargs) + + +def roll_up_down_capture(returns, factor_returns, window=10, **kwargs): + """ + Computes the up/down capture measure over a rolling window. + + Parameters + ---------- + see documentation for `capture` (pass all args, kwargs required) + + window : int, required + Size of the rolling window in terms of the periodicity of the data. + - eg window = 60, periodicity=DAILY, represents a rolling 60 day window + """ + return roll(returns, factor_returns, window=window, + function=up_down_capture, **kwargs) + + +def roll_max_drawdown(returns, window=10, **kwargs): + """ + Computes the max_drawdown measure over a rolling window. + + Parameters + ---------- + see documentation for `max_drawdown` (pass all args, kwargs required) + + window : int, required + Size of the rolling window in terms of the periodicity of the data. + - eg window = 60, periodicity=DAILY, represents a rolling 60 day window + """ + return roll(returns, window=window, function=max_drawdown, **kwargs) + + +def roll_alpha_beta(returns, factor_returns, window=10, **kwargs): + """ + Computes the alpha_beta measure over a rolling window. + + Parameters + ---------- + see documentation for `alpha_beta` (pass all args, kwargs required) + + window : int, required + Size of the rolling window in terms of the periodicity of the data. + - eg window = 60, periodicity=DAILY, represents a rolling 60 day window + """ + return roll(returns, factor_returns, window=window, + function=alpha_beta_aligned, **kwargs) + + +def roll_sharpe_ratio(returns, window=10, **kwargs): + """ + Computes the sharpe ratio measure over a rolling window. + + Parameters + ---------- + see documentation for `sharpe_ratio` (pass all args, kwargs required) + + window : int, required + Size of the rolling window in terms of the periodicity of the data. + - eg window = 60, periodicity=DAILY, represents a rolling 60 day window + """ + return roll(returns, window=window, function=sharpe_ratio, **kwargs) + + SIMPLE_STAT_FUNCS = [ cum_returns_final, annual_return, @@ -1022,7 +1230,10 @@ def cagr(returns, period=DAILY, annualization=None): ] FACTOR_STAT_FUNCS = [ - information_ratio, + excess_sharpe, alpha, beta, + capture, + up_capture, + down_capture ] diff --git a/empyrical/tests/test_stats.py b/empyrical/tests/test_stats.py index c4c05b5..b423935 100644 --- a/empyrical/tests/test_stats.py +++ b/empyrical/tests/test_stats.py @@ -5,7 +5,7 @@ from operator import attrgetter from unittest import TestCase, skip, SkipTest -from nose_parameterized import parameterized +from parameterized import parameterized import numpy as np from numpy.testing import assert_almost_equal, assert_allclose import pandas as pd @@ -14,6 +14,7 @@ from six import iteritems, wraps import empyrical +import empyrical.utils as emutils DECIMAL_PLACES = 8 @@ -35,6 +36,11 @@ class TestStats(TestCase): np.array([0., -6., -7., -1., -9., -2., -6., -8., -5.]) / 100, index=pd.date_range('2000-1-30', periods=9, freq='D')) + # All negative returns + all_negative_returns = pd.Series( + np.array([-2., -6., -7., -1., -9., -2., -6., -8., -5.]) / 100, + index=pd.date_range('2000-1-30', periods=9, freq='D')) + # Positive and negative returns with max drawdown mixed_returns = pd.Series( np.array([np.nan, 1., 10., -4., 2., 3., 2., 1., -10.]) / 100, @@ -197,6 +203,7 @@ def test_aggregate_returns(self, returns, convert_to, expected): (mixed_returns, -0.1), (positive_returns, -0.0), (negative_returns, -0.36590730349873601), + (all_negative_returns, -0.3785891574287616), (pd.Series( np.array([10, -10, 10]) / 100, index=pd.date_range('2000-1-30', periods=3, freq='D')), @@ -641,9 +648,9 @@ def test_sortino_translation_diff(self, returns, required_return, (mixed_returns, 0.0, 0.10859306069076737), (mixed_returns, flat_line_1, -0.06515583641446039), ]) - def test_information_ratio(self, returns, factor_returns, expected): + def test_excess_sharpe(self, returns, factor_returns, expected): assert_almost_equal( - self.empyrical.information_ratio(returns, factor_returns), + self.empyrical.excess_sharpe(returns, factor_returns), expected, DECIMAL_PLACES) @@ -654,13 +661,13 @@ def test_information_ratio(self, returns, factor_returns, expected): (flat_line_1_tz, pos_line), (noise, pos_line) ]) - def test_information_ratio_noisy(self, noise_line, benchmark): + def test_excess_sharpe_noisy(self, noise_line, benchmark): noisy_returns_1 = noise_line[0:250].add(benchmark[250:], fill_value=0) noisy_returns_2 = noise_line[0:500].add(benchmark[500:], fill_value=0) noisy_returns_3 = noise_line[0:750].add(benchmark[750:], fill_value=0) - ir_1 = self.empyrical.information_ratio(noisy_returns_1, benchmark) - ir_2 = self.empyrical.information_ratio(noisy_returns_2, benchmark) - ir_3 = self.empyrical.information_ratio(noisy_returns_3, benchmark) + ir_1 = self.empyrical.excess_sharpe(noisy_returns_1, benchmark) + ir_2 = self.empyrical.excess_sharpe(noisy_returns_2, benchmark) + ir_3 = self.empyrical.excess_sharpe(noisy_returns_3, benchmark) assert abs(ir_1) < abs(ir_2) assert abs(ir_2) < abs(ir_3) @@ -672,16 +679,16 @@ def test_information_ratio_noisy(self, noise_line, benchmark): (neg_line, noise, flat_line_1_tz), (neg_line, inv_noise, flat_line_1_tz) ]) - def test_information_ratio_trans(self, returns, add_noise, translation): - ir = self.empyrical.information_ratio( + def test_excess_sharpe_trans(self, returns, add_noise, translation): + ir = self.empyrical.excess_sharpe( returns+add_noise, returns ) - raised_ir = self.empyrical.information_ratio( + raised_ir = self.empyrical.excess_sharpe( returns+add_noise+translation, returns ) - depressed_ir = self.empyrical.information_ratio( + depressed_ir = self.empyrical.excess_sharpe( returns+add_noise-translation, returns ) @@ -932,10 +939,10 @@ def test_tail_ratio(self, returns, expected): # Regression tests for CAGR. @parameterized.expand([ - (empty_returns, "daily", np.nan), - (one_return, "daily", 11.274002099240244), - (mixed_returns, "daily", 1.9135925373194231), - (flat_line_1_tz, "daily", 11.274002099240256), + (empty_returns, empyrical.DAILY, np.nan), + (one_return, empyrical.DAILY, 11.274002099240244), + (mixed_returns, empyrical.DAILY, 1.9135925373194231), + (flat_line_1_tz, empyrical.DAILY, 11.274002099240256), (pd.Series(np.array( [3., 3., 3.])/100, index=pd.date_range('2000-1-30', periods=3, freq='A') @@ -989,6 +996,207 @@ def test_cagr_noisy(self, returns, add_noise): noisy_cagr_2, 1) + @parameterized.expand([ + (empty_returns, 6, []), + (negative_returns, 6, [-0.2282, -0.2745, -0.2899]) + ]) + def test_roll_max_drawdown(self, returns, window, expected): + test = self.empyrical.roll_max_drawdown(returns, window=window) + assert_almost_equal( + np.asarray(test), + np.asarray(expected), + 4) + + @parameterized.expand([ + (empty_returns, 6, []), + (negative_returns, 6, [-18.09162052, -26.79897486, -26.69138263]), + (mixed_returns, 6, [7.57445259, 8.22784105, 8.22784105]) + ]) + def test_roll_sharpe_ratio(self, returns, window, expected): + test = self.empyrical.roll_sharpe_ratio(returns, window=window) + assert_almost_equal( + np.asarray(test), + np.asarray(expected), + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, np.nan), + (one_return, one_return, 1.), + (mixed_returns, mixed_returns, 1.), + (all_negative_returns, mixed_returns, -0.52257643222960259) + ]) + def test_capture_ratio(self, returns, factor_returns, expected): + assert_almost_equal( + self.empyrical.capture(returns, factor_returns), + expected, + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, np.nan), + (one_return, one_return, np.nan), + (mixed_returns, mixed_returns, 1.), + (all_negative_returns, mixed_returns, 0.99956025703798634), + (positive_returns, mixed_returns, -11.27400221) + ]) + def test_down_capture(self, returns, factor_returns, expected): + assert_almost_equal( + self.empyrical.down_capture(returns, factor_returns), + expected, + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, simple_benchmark, 1, []), + (one_return, one_return, 1, []), + (mixed_returns, negative_returns, + 6, [(-3.81286957, -0.7826087), (-4.03558719, -0.76156584), + (-2.66915888, -0.61682243)]), + (mixed_returns, mixed_returns, + 6, [(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)]), + (mixed_returns, -mixed_returns, + 6, [(0.0, -1.0), (0.0, -1.0), (0.0, -1.0)]), + ]) + def test_roll_alpha_beta(self, returns, benchmark, window, expected): + + test = self.empyrical.roll_alpha_beta(returns, benchmark, window) + alpha_test = [t[0] for t in test] + beta_test = [t[1] for t in test] + + alpha_expected = [t[0] for t in expected] + beta_expected = [t[1] for t in expected] + + assert_almost_equal( + alpha_test, + alpha_expected, + DECIMAL_PLACES) + + assert_almost_equal( + beta_test, + beta_expected, + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, 1, []), + (one_return, one_return, 1, np.nan), + (mixed_returns, mixed_returns, 6, [1., 1., 1.]), + (positive_returns, mixed_returns, + 6, [-0.00011389, -0.00025861, -0.00015211]), + (all_negative_returns, mixed_returns, + 6, [-6.38880246e-05, -1.65241701e-04, -1.65241719e-04]) + ]) + def test_roll_up_down_capture(self, returns, factor_returns, window, + expected): + test = self.empyrical.roll_up_down_capture(returns, factor_returns, + window=window) + assert_almost_equal( + np.asarray(test), + np.asarray(expected), + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, 1, []), + (one_return, one_return, 1, 1.), + (mixed_returns, mixed_returns, 6, [1., 1., 1.]), + (positive_returns, mixed_returns, + 6, [-11.2743862, -11.2743862, -11.2743862]), + (all_negative_returns, mixed_returns, + 6, [0.92058591, 0.92058591, 0.92058591]) + ]) + def test_roll_down_capture(self, returns, factor_returns, window, + expected): + test = self.empyrical.roll_down_capture(returns, factor_returns, + window=window) + assert_almost_equal( + np.asarray(test), + np.asarray(expected), + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, 1, []), + (one_return, one_return, 1, 1.), + (mixed_returns, mixed_returns, 6, [1., 1., 1.]), + (positive_returns, mixed_returns, + 6, [0.00128406, 0.00291564, 0.00171499]), + (all_negative_returns, mixed_returns, + 6, [-5.88144154e-05, -1.52119182e-04, -1.52119198e-04]) + ]) + def test_roll_up_capture(self, returns, factor_returns, window, expected): + test = self.empyrical.roll_up_capture(returns, factor_returns, + window=window) + assert_almost_equal( + np.asarray(test), + np.asarray(expected), + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, simple_benchmark, (np.nan, np.nan)), + (one_return, one_return, (np.nan, np.nan)), + (mixed_returns[1:], negative_returns[1:], + (-8.306666666666668, -0.71296296296296313)), + (mixed_returns, mixed_returns, (0.0, 1.0)), + (mixed_returns, -mixed_returns, (0.0, -1.0)) + ]) + def test_down_alpha_beta(self, returns, benchmark, expected): + down_alpha, down_beta = self.empyrical( + pandas_only=len(returns) != len(benchmark), + return_types=tuple, + ).down_alpha_beta(returns, benchmark) + assert_almost_equal( + down_alpha, + expected[0], + DECIMAL_PLACES) + assert_almost_equal( + down_beta, + expected[1], + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, simple_benchmark, (np.nan, np.nan)), + (one_return, one_return, (np.nan, np.nan)), + (mixed_returns[1:], positive_returns[1:], + (0.3599999999999995, 0.4285714285)), + (mixed_returns, mixed_returns, (0.0, 1.0)), + (mixed_returns, -mixed_returns, (0.0, -1.0)) + ]) + def test_up_alpha_beta(self, returns, benchmark, expected): + up_alpha, up_beta = self.empyrical( + pandas_only=len(returns) != len(benchmark), + return_types=tuple, + ).up_alpha_beta(returns, benchmark) + assert_almost_equal( + up_alpha, + expected[0], + DECIMAL_PLACES) + assert_almost_equal( + up_beta, + expected[1], + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, np.nan), + (one_return, one_return, np.nan), + (mixed_returns, mixed_returns, 1.), + (positive_returns, mixed_returns, -0.0006756053495), + (all_negative_returns, mixed_returns, -0.0004338236) + ]) + def test_up_down_capture(self, returns, factor_returns, expected): + assert_almost_equal( + self.empyrical.up_down_capture(returns, factor_returns), + expected, + DECIMAL_PLACES) + + @parameterized.expand([ + (empty_returns, empty_returns, np.nan), + (one_return, one_return, 1.), + (mixed_returns, mixed_returns, 1.), + (positive_returns, mixed_returns, 0.0076167762), + (all_negative_returns, mixed_returns, -0.0004336328) + ]) + def test_up_capture(self, returns, factor_returns, expected): + assert_almost_equal( + self.empyrical.up_capture(returns, factor_returns), + expected, + DECIMAL_PLACES) + @property def empyrical(self): """ @@ -1048,6 +1256,73 @@ def empyrical(self): ) +class TestHelpers(TestCase): + """ + Tests for helper methods and utils. + """ + + def setUp(self): + + self.ser_length = 120 + self.window = 12 + + self.returns = pd.Series( + np.random.randn(1, 120)[0]/100., + index=pd.date_range('2000-1-30', periods=120, freq='M')) + + self.factor_returns = pd.Series( + np.random.randn(1, 120)[0]/100., + index=pd.date_range('2000-1-30', periods=120, freq='M')) + + def test_roll_pandas(self): + res = emutils.roll(self.returns, + self.factor_returns, + window=12, + function=empyrical.alpha_aligned) + + self.assertTrue(res.size == self.ser_length - self.window) + + def test_roll_ndarray(self): + res = emutils.roll(self.returns.values, + self.factor_returns.values, + window=12, + function=empyrical.alpha_aligned) + + self.assertTrue(len(res == self.ser_length - self.window)) + + def test_down(self): + pd_res = emutils.down(self.returns, self.factor_returns, + function=empyrical.capture) + np_res = emutils.down(self.returns.values, self.factor_returns.values, + function=empyrical.capture) + + self.assertTrue(isinstance(pd_res, float)) + assert_almost_equal(pd_res, np_res, DECIMAL_PLACES) + + def test_up(self): + pd_res = emutils.up(self.returns, self.factor_returns, + function=empyrical.capture) + np_res = emutils.up(self.returns.values, self.factor_returns.values, + function=empyrical.capture) + + self.assertTrue(isinstance(pd_res, float)) + assert_almost_equal(pd_res, np_res, DECIMAL_PLACES) + + def test_roll_bad_types(self): + with self.assertRaises(ValueError): + emutils.roll(self.returns.values, + self.factor_returns, + window=12, + function=empyrical.max_drawdown) + + def test_roll_max_window(self): + res = emutils.roll(self.returns, + self.factor_returns, + window=self.ser_length + 100, + function=empyrical.max_drawdown) + self.assertTrue(res.size == 0) + + class Test2DStats(TestCase): """ Tests for functions that are capable of outputting a DataFrame. diff --git a/empyrical/utils.py b/empyrical/utils.py index 61c8381..4ca5800 100644 --- a/empyrical/utils.py +++ b/empyrical/utils.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import pandas as pd +import numpy as np try: # fast versions @@ -25,7 +27,6 @@ nanargmin = bn.nanargmin except ImportError: # slower numpy - import numpy as np nanmean = np.nanmean nanstd = np.nanstd nansum = np.nansum @@ -33,3 +34,114 @@ nanmin = np.nanmin nanargmax = np.nanargmax nanargmin = np.nanargmin + + +def roll(*args, **kwargs): + """ + Calculates a given statistic across a rolling time period. + + Parameters + ---------- + returns : pd.Series or np.ndarray + Daily returns of the strategy, noncumulative. + - See full explanation in :func:`~empyrical.stats.cum_returns`. + factor_returns (optional): float / series + Benchmark return to compare returns against. + function: + the function to run for each rolling window. + window (keyword): int + the number of periods included in each calculation. + (other keywords): other keywords that are required to be passed to the + function in the 'function' argument may also be passed in. + + Returns + ------- + np.ndarray, pd.Series + depends on input type + ndarray(s) ==> ndarray + Series(s) ==> pd.Series + + A Series or ndarray of the results of the stat across the rolling + window. + + """ + func = kwargs.pop('function') + window = kwargs.pop('window') + if len(args) > 2: + raise ValueError("Cannot pass more than 2 return sets") + + if len(args) == 2: + if not isinstance(args[0], type(args[1])): + raise ValueError("The two returns arguments are not the same.") + + if isinstance(args[0], np.ndarray): + return _roll_ndarray(func, window, *args, **kwargs) + return _roll_pandas(func, window, *args, **kwargs) + + +def up(returns, factor_returns, **kwargs): + """ + Calculates a given statistic filtering only positive factor return periods. + + Parameters + ---------- + returns : pd.Series or np.ndarray + Daily returns of the strategy, noncumulative. + - See full explanation in :func:`~empyrical.stats.cum_returns`. + factor_returns (optional): float / series + Benchmark return to compare returns against. + function: + the function to run for each rolling window. + (other keywords): other keywords that are required to be passed to the + function in the 'function' argument may also be passed in. + + Returns + ------- + Same as the return of the function + """ + func = kwargs.pop('function') + returns = returns[factor_returns > 0] + factor_returns = factor_returns[factor_returns > 0] + return func(returns, factor_returns, **kwargs) + + +def down(returns, factor_returns, **kwargs): + """ + Calculates a given statistic filtering only negative factor return periods. + + Parameters + ---------- + returns : pd.Series or np.ndarray + Daily returns of the strategy, noncumulative. + - See full explanation in :func:`~empyrical.stats.cum_returns`. + factor_returns (optional): float / series + Benchmark return to compare returns against. + function: + the function to run for each rolling window. + (other keywords): other keywords that are required to be passed to the + function in the 'function' argument may also be passed in. + + Returns + ------- + Same as the return of the 'function' + """ + func = kwargs.pop('function') + returns = returns[factor_returns < 0] + factor_returns = factor_returns[factor_returns < 0] + return func(returns, factor_returns, **kwargs) + + +def _roll_ndarray(func, window, *args, **kwargs): + data = [] + for i in range(window, len(args[0])): + rets = [s[i-window:i] for s in args] + data.append(func(*rets, **kwargs)) + return np.array(data) + + +def _roll_pandas(func, window, *args, **kwargs): + data = {} + for i in range(window, len(args[0])): + rets = [s.iloc[i-window:i] for s in args] + data[args[0].index[i]] = func(*rets, **kwargs) + return pd.Series(data) diff --git a/setup.py b/setup.py index bb8826e..2a00901 100755 --- a/setup.py +++ b/setup.py @@ -53,7 +53,7 @@ test_reqs = [ "nose>=1.3.7", - "nose_parameterized>=0.5.0" + "parameterized>=0.6.1" ] @@ -68,7 +68,7 @@ extras_requirements = { "dev": [ "nose==1.3.7", - "nose-parameterized==0.5.0", + "parameterized==0.6.1", "flake8==2.5.1" ] }