Skip to content

Commit

Permalink
Merge pull request #2734 from quantopian/benchmark-cleanup
Browse files Browse the repository at this point in the history
Benchmark cleanup
  • Loading branch information
richafrank committed Jul 16, 2020
2 parents acd46c5 + bc9e754 commit ae9b8bc
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 24 deletions.
18 changes: 18 additions & 0 deletions tests/finance/test_risk.py
Expand Up @@ -317,6 +317,24 @@ def test_sharpe_value_when_null(self):
)
self.assertEqual(test_period['sharpe'], 0.0)

def test_sharpe_value_when_benchmark_null(self):
# Sharpe is displayed as '0.0' instead of np.nan
null_returns = factory.create_returns_from_list(
[0.0]*251,
self.sim_params
)
test_period = ClassicRiskMetrics.risk_metric_period(
start_session=self.start_session,
end_session=self.end_session,
algorithm_returns=null_returns,
benchmark_returns=null_returns,
algorithm_leverages=pd.Series(
0.0,
index=self.algo_returns.index
)
)
self.assertEqual(test_period['sharpe'], 0.0)

def test_representation(self):
test_period = ClassicRiskMetrics.risk_metric_period(
start_session=self.start_session,
Expand Down
47 changes: 23 additions & 24 deletions zipline/utils/run_algo.py
Expand Up @@ -401,14 +401,14 @@ class BenchmarkSpec(object):
Parameters
----------
benchmark_returns : pd.Series
benchmark_returns : pd.Series, optional
Series of returns to use as the benchmark.
benchmark_file : str or file
File containing a csv with `date` and `return` columns, to be read as
the benchmark.
benchmark_sid : int
benchmark_sid : int, optional
Sid of the asset to use as a benchmark.
benchmark_symbol : int
benchmark_symbol : str, optional
Symbol of the asset to use as a benchmark. Symbol will be looked up as
of the end date of the backtest.
no_benchmark : bool
Expand Down Expand Up @@ -501,28 +501,27 @@ def resolve(self, asset_finder, start_date, end_date):
raise _RunAlgoError(
"Symbol %s as a benchmark not found in this bundle."
)
elif self.no_benchmark:
benchmark_sid = None
benchmark_returns = self._zero_benchmark_returns(
start_date=start_date,
end_date=end_date,
)
else:
if not self.no_benchmark:
log.warn(
"No benchmark configured. "
"Assuming algorithm calls set_benchmark."
)
log.warn(
"Pass --benchmark-sid, --benchmark-symbol, or"
" --benchmark-file to set a source of benchmark returns."
)
log.warn(
"Pass --no-benchmark to use a dummy benchmark "
"of zero returns.",
)
benchmark_sid = None
benchmark_returns = None
else:
benchmark_sid = None
benchmark_returns = self._zero_benchmark_returns(
start_date=start_date,
end_date=end_date,
)
log.warn(
"No benchmark configured. "
"Assuming algorithm calls set_benchmark."
)
log.warn(
"Pass --benchmark-sid, --benchmark-symbol, or"
" --benchmark-file to set a source of benchmark returns."
)
log.warn(
"Pass --no-benchmark to use a dummy benchmark "
"of zero returns.",
)
benchmark_sid = None
benchmark_returns = None

return benchmark_sid, benchmark_returns

Expand Down

0 comments on commit ae9b8bc

Please sign in to comment.