Skip to content

Commit

Permalink
Merge 8ec3620 into 098c236
Browse files Browse the repository at this point in the history
  • Loading branch information
bashtage committed Jan 7, 2019
2 parents 098c236 + 8ec3620 commit 7cc4069
Show file tree
Hide file tree
Showing 33 changed files with 834 additions and 606 deletions.
12 changes: 8 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,10 @@ env:
- USE_NUMBA=true
- STATSMODELS_MASTER=false
- DOCBUILD=false
- PYTEST_PATTERN="(not slow)"
- secure: "dCvSzHmiqumrvQDQJXaPukd2TEituoUJTRJeBrLfEg0f1ZtAkoEcCudXuissODo1s/e+zClEhn3GRMGZc9QxKI0w8VHWeyMDHjFhyl4wubi35biUpXijYgs6IMqqdIo5oKcnMR6jG8uzuzPGDCpZh49+DgGU5E7rGHBgJ5Tdo8s="


matrix:
fast_finish: true
include:
Expand Down Expand Up @@ -51,11 +53,12 @@ matrix:
- PYTHON=3.6
- DOCBUILD=true
- MATPLOTLIB=3
- PYTEST_PATTERN="(slow or not slow)"

before_install:
- git fetch --tags
- sudo apt-get -y install pandoc
- wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda3.sh
- wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda3.sh -nv
- chmod +x miniconda3.sh
- ./miniconda3.sh -b
- export PATH=/home/travis/miniconda3/bin:$PATH
Expand All @@ -73,13 +76,13 @@ before_install:
- PKGS="${PKGS} patsy"; if [[ -n ${PATSY} ]]; then PKGS="${PKGS}=${PATSY}"; fi;
- PKGS="${PKGS} pandas"; if [[ -n ${PANDAS} ]]; then PKGS="${PKGS}=${PANDAS}"; fi;
- PKGS="${PKGS} Cython"; if [[ -n ${CYTHON} ]]; then PKGS="${PKGS}=${CYTHON}"; fi;
- if [[ -n ${MATPLOTLIB} ]]; then PKGS="${PKGS} matplotlib=${MATPLOTLIB}"; fi;
- if [[ -n ${MATPLOTLIB} ]]; then PKGS="${PKGS} matplotlib=${MATPLOTLIB} seaborn"; fi;
- PKGS="${PKGS} statsmodels"; if [[ -n ${STATSMODELS} ]]; then PKGS="${PKGS}=${STATSMODELS}"; fi;
- if [[ ${USE_NUMBA} = true ]]; then PKGS="${PKGS} numba"; if [[ -n ${NUMBA} ]]; then PKGS="${PKGS}=${NUMBA}"; fi; fi;
- echo conda create --yes --quiet -n arch-test ${PKGS}
- conda create --yes --quiet -n arch-test ${PKGS}
- source activate arch-test
- pip install flake8 nbconvert nbformat pytest coverage coveralls pytest-cov codecov pytest-xdist cached_property
- pip install cached_property flake8 "pytest<4.1" pytest-xdist pytest-cov coverage coveralls codecov nbformat nbconvert!=5.4 jupyter_client ipython jupyter -q
- if [[ "$STATSMODELS_MASTER" == true ]]; then sh ./ci/statsmodels-master.sh; fi;
- |
if [[ "$DOCBUILD" == true ]]; then
Expand All @@ -101,7 +104,8 @@ install:
script:
- set -e
- python ci/performance.py
- pytest -r a ${COVERAGE_OPTS} arch --durations=25
- echo pytest -r a -m "$PYTEST_PATTERN" ${COVERAGE_OPTS} --durations=25 arch
- pytest -r a -m "$PYTEST_PATTERN" ${COVERAGE_OPTS} --durations=25 arch
- |
if [[ "$DOCBUILD" = true ]]; then
python setup.py install
Expand Down
2 changes: 1 addition & 1 deletion appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ build_script:
- cmd: python setup.py develop

test_script:
- cmd: py.test -n 2 arch --durations=25
- cmd: py.test -n 2 arch -m "(not slow)" --durations=25
16 changes: 16 additions & 0 deletions arch/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import pytest


def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
help="skip slow tests")
parser.addoption("--only-slow", action="store_true",
help="run only slow tests")


def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption("--skip-slow"): # pragma: no cover
pytest.skip("skipping due to --skip-slow") # pragma: no cover

if 'slow' not in item.keywords and item.config.getoption("--only-slow"): # pragma: no cover
pytest.skip("skipping due to --only-slow") # pragma: no cover
Empty file added arch/data/__init__.py
Empty file.
13 changes: 13 additions & 0 deletions arch/data/binary/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from arch.data.utility import load_file


def load():
"""
Load the graduate school admissions dataused in the examples
Returns
-------
data : DataFrame
Dataset containing GRE, GPA and class rank, and admission decision
"""
return load_file(__file__, 'binary.csv.gz')
Binary file added arch/data/binary/binary.csv.gz
Binary file not shown.
17 changes: 17 additions & 0 deletions arch/data/core_cpi/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from arch.data.utility import load_file


def load():
"""
Load the Core CPI data used in the examples
Returns
-------
data : DataFrame
Data set containing the CPI less Food and Energy
Notes
-----
From the FRED database
"""
return load_file(__file__, 'core-cpi.csv.gz')
Binary file added arch/data/core_cpi/core-cpi.csv.gz
Binary file not shown.
13 changes: 13 additions & 0 deletions arch/data/default/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from arch.data.utility import load_file


def load():
"""
Load the AAA and BAA rates used in the examples
Returns
-------
data : DataFrame
Data set containing the rates on AAA and BAA rated bonds.
"""
return load_file(__file__, 'default.csv.gz')
Binary file added arch/data/default/default.csv.gz
Binary file not shown.
19 changes: 19 additions & 0 deletions arch/data/frenchdata/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from arch.data.utility import load_file


def load():
"""
Load the Fama-French factor data used in the examples
Returns
-------
data : DataFrame
Data set containing excess market, size and value factors and the
risk-free rate
Notes
-----
Provided by Ken French,
http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html
"""
return load_file(__file__, 'frenchdata.csv.gz')
Binary file added arch/data/frenchdata/frenchdata.csv.gz
Binary file not shown.
13 changes: 13 additions & 0 deletions arch/data/nasdaq/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from arch.data.utility import load_file


def load():
"""
Load the NASDAQ Composite data used in the examples
Returns
-------
data : DataFrame
Data set containing OHLC, adjusted close and the trading volume.
"""
return load_file(__file__, 'nasdaq.csv.gz')
Binary file added arch/data/nasdaq/nasdaq.csv.gz
Binary file not shown.
13 changes: 13 additions & 0 deletions arch/data/sp500/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from arch.data.utility import load_file


def load():
"""
Load the S&P 500 data used in the examples
Returns
-------
data : DataFrame
Data set containing OHLC, adjusted close and the trading volume.
"""
return load_file(__file__, 'sp500.csv.gz')
Binary file added arch/data/sp500/sp500.csv.gz
Binary file not shown.
25 changes: 25 additions & 0 deletions arch/data/utility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import os

import pandas as pd


def load_file(file_base, filename):
"""
Parameters
----------
filename : str
Name of csv.gz to load
Returns
-------
data : DataFrame
Dataframe containing the loaded data
"""
curr_dir = os.path.split(os.path.abspath(file_base))[0]
data = pd.read_csv(os.path.join(curr_dir, filename))
if 'Date' in data:
data.Date = pd.to_datetime(data.Date)
data = data.set_index('Date')
for col in data:
data[col] = pd.to_numeric(data[col], errors='coerce')
return data
13 changes: 13 additions & 0 deletions arch/data/vix/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from arch.data.utility import load_file


def load():
"""
Load the VIX Index data used in the examples
Returns
-------
data : DataFrame
Data set containing historical VIX
"""
return load_file(__file__, 'vix.csv.gz')
Binary file added arch/data/vix/vix.csv.gz
Binary file not shown.
17 changes: 17 additions & 0 deletions arch/data/wti/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from arch.data.utility import load_file


def load():
"""
Load the West Texas Intermediate crude oil price data used in the examples
Returns
-------
data : DataFrame
Data set containing the price of WTI
Notes
-----
From the FRED database
"""
return load_file(__file__, 'wti.csv.gz')
Binary file added arch/data/wti/wti.csv.gz
Binary file not shown.
19 changes: 19 additions & 0 deletions arch/tests/test_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import pytest
import os
import glob
import pandas as pd

CURR_DIR = os.path.split(os.path.abspath(__file__))[0]
FILES = glob.glob(os.path.join(CURR_DIR, '..', 'data', '*'))
DATASETS = [os.path.split(f)[-1] for f in FILES if ('.py' not in f and '__' not in f)]


@pytest.fixture(params=DATASETS)
def dataset(request):
return request.param


def test_dataset(dataset):
mod = __import__('arch.data.{0}'.format(dataset), fromlist=[dataset])
data = mod.load()
assert isinstance(data, pd.DataFrame)
45 changes: 45 additions & 0 deletions arch/tests/test_examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import glob
import os
import sys

import pytest

try:
import jupyter_client
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
# matplotlib is required for most notebooks
import matplotlib # noqa: F401

kernels = jupyter_client.kernelspec.find_kernel_specs()
except ImportError: # pragma: no cover
pytestmark = pytest.mark.skip(reason='Required packages not available')

SLOW_NOTEBOOKS = ['multiple-comparison_examples.ipynb']

kernel_name = 'python%s' % sys.version_info.major

head, _ = os.path.split(__file__)
NOTEBOOK_DIR = os.path.abspath(os.path.join(head, '..', '..', 'examples'))

nbs = sorted(glob.glob(os.path.join(NOTEBOOK_DIR, '*.ipynb')))
ids = list(map(lambda s: os.path.split(s)[-1].split('.')[0], nbs))
if not nbs: # pragma: no cover
pytest.mark.skip(reason='No notebooks found and so no tests run')


@pytest.fixture(params=nbs, ids=ids)
def notebook(request):
return request.param


@pytest.mark.slow
def test_notebook(notebook):
nb_name = os.path.split(notebook)[-1]
if nb_name in SLOW_NOTEBOOKS:
pytest.skip('Notebook is too slow to test')
nb = nbformat.read(notebook, as_version=4)
ep = ExecutePreprocessor(allow_errors=False,
timeout=240,
kernel_name=kernel_name)
ep.preprocess(nb, {'metadata': {'path': NOTEBOOK_DIR}})
9 changes: 9 additions & 0 deletions arch/univariate/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1078,12 +1078,20 @@ def plot(self, annualize=None, scale=None):
>>> fig = res.plot(scale=360)
"""
from matplotlib.pyplot import figure

def _set_tight_x(axis, index):
try:
axis.set_xlim(index[0], index[-1])
except ValueError:
pass

fig = figure()

ax = fig.add_subplot(2, 1, 1)
ax.plot(self._index, self.resid / self.conditional_volatility)
ax.set_title('Standardized Residuals')
ax.axes.xaxis.set_ticklabels([])
_set_tight_x(ax, self._index)

ax = fig.add_subplot(2, 1, 2)
vol = self.conditional_volatility
Expand All @@ -1100,6 +1108,7 @@ def plot(self, annualize=None, scale=None):
title = 'Conditional Volatility'

ax.plot(self._index, vol)
_set_tight_x(ax, self._index)
ax.set_title(title)

return fig
Expand Down
1 change: 1 addition & 0 deletions ci/statsmodels-master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ git clone --branch=master --depth=10 https://github.com/statsmodels/statsmodels.
cd statsmodels
python setup.py install
cd $GITDIR
pip install seaborn
104 changes: 46 additions & 58 deletions examples/bootstrap_examples.ipynb

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions examples/multiple-comparison_examples.ipynb

Large diffs are not rendered by default.

134 changes: 65 additions & 69 deletions examples/unitroot_examples.ipynb

Large diffs are not rendered by default.

215 changes: 108 additions & 107 deletions examples/univariate_using_fixed_variance.ipynb

Large diffs are not rendered by default.

0 comments on commit 7cc4069

Please sign in to comment.