Skip to content

Commit

Permalink
More more
Browse files Browse the repository at this point in the history
  • Loading branch information
bashtage committed Feb 13, 2020
1 parent 0481c2b commit 35b9c3c
Show file tree
Hide file tree
Showing 10 changed files with 72 additions and 44 deletions.
16 changes: 8 additions & 8 deletions arch/tests/unitroot/test_unitroot.py
Expand Up @@ -70,7 +70,7 @@ def test_adf_no_lags(self):
assert_almost_equal(adf, -6.56880, DECIMAL_4)

def test_adf_nc_no_lags(self):
adf = ADF(self.inflation, trend="nc", lags=0)
adf = ADF(self.inflation, trend="n", lags=0)
assert_almost_equal(adf.stat, -3.88845, DECIMAL_4)
# 16.239

Expand Down Expand Up @@ -117,9 +117,9 @@ def test_adf_auto_t_stat(self):
adf.trend = "ctt"
assert adf.stat != old_stat
assert adf.trend == "ctt"
assert len(adf.valid_trends) == len(("nc", "c", "ct", "ctt"))
assert len(adf.valid_trends) == len(("n", "c", "ct", "ctt"))
for d in adf.valid_trends:
assert d in ("nc", "c", "ct", "ctt")
assert d in ("n", "c", "ct", "ctt")
assert adf.null_hypothesis == "The process contains a unit root."
assert adf.alternative_hypothesis == "The process is weakly " "stationary."

Expand Down Expand Up @@ -187,7 +187,7 @@ def test_dfgls_auto(self):
def test_dfgls_bad_trend(self):
dfgls = DFGLS(self.inflation, trend="ct", method="BIC", max_lags=3)
with pytest.raises(ValueError):
dfgls.trend = "nc"
dfgls.trend = "n"

assert dfgls != 0.0

Expand Down Expand Up @@ -270,7 +270,7 @@ def test_variance_ratio_non_robust(self):

def test_variance_ratio_no_constant(self):
y = self.rng.standard_normal(100)
vr = VarianceRatio(y, trend="nc", debiased=False)
vr = VarianceRatio(y, trend="n", debiased=False)
dy = np.diff(y)
mu = 0.0
dy2 = y[2:] - y[:-2]
Expand Down Expand Up @@ -390,7 +390,7 @@ def test_tstat_exogenous(self):
assert np.max(np.argwhere(np.abs(direct[2:]) > crit)) == sel_lag


@pytest.mark.parametrize("trend", ["nc", "c", "ct", "ctt"])
@pytest.mark.parametrize("trend", ["n", "c", "ct", "ctt"])
def test_trends_low_memory(trend):
rnd = np.random.RandomState(12345)
y = np.cumsum(rnd.standard_normal(250))
Expand All @@ -404,13 +404,13 @@ def test_trends_low_memory(trend):
assert_equal(adf.max_lags, 1)


@pytest.mark.parametrize("trend", ["nc", "c", "ct", "ctt"])
@pytest.mark.parametrize("trend", ["n", "c", "ct", "ctt"])
def test_representations(trend):
rnd = np.random.RandomState(12345)
y = np.cumsum(rnd.standard_normal(250))
adf = ADF(y, trend=trend, max_lags=16)
check = "Constant"
if trend == "nc":
if trend == "n":
check = "No Trend"
assert check in adf.__repr__()
assert check in adf.__repr__()
Expand Down
29 changes: 24 additions & 5 deletions arch/unitroot/cointegration.py
Expand Up @@ -9,6 +9,7 @@
from statsmodels.tsa.tsatools import add_trend

from arch.typing import ArrayLike1D, ArrayLike2D
from arch.unitroot.critical_values.engle_granger import EngleGrangerCV
from arch.utility.array import ensure1d, ensure2d


Expand Down Expand Up @@ -137,8 +138,6 @@ def result(self):
def _cross_section(y, x, trend):
if trend not in ("n", "c", "ct", "t"):
raise ValueError('trend must be one of "n", "c", "ct" or "t"')
y = np.asarray(ensure1d(y, "x", False))
x = np.asarray(ensure2d(x, "x"))
x = add_trend(x, trend)
res = OLS(y, x).fit()
return res.resid
Expand All @@ -154,11 +153,17 @@ def engle_granger(
method: str = "aic",
df_adjust: Union[bool, int] = True,
):

y = np.asarray(ensure1d(y, "x", False))
x = np.asarray(ensure2d(x, "x"))
resid = _cross_section(y, x, trend)
from arch.unitroot.unitroot import ADF

ADF(resid, lags, trend="n", max_lags=max_lags, method=method)
pass
adf = ADF(resid, lags, trend="n", max_lags=max_lags, method=method)
# TODO: pvalue, crit val method need to be better
eg_cv = EngleGrangerCV()
cv = pd.Series({p: eg_cv[trend, p, x.shape[1] + 1] for p in (10, 5, 1)})
return CointegrationTestResult(adf.stat, adf.pvalue, cv, "Engle-Granger Test")


def phillips_ouliaris(y, x, trend="c", lags=None, df_adjust=True):
Expand All @@ -177,25 +182,39 @@ def __init__(
self._null = "No Cointegration"
self._alternative = "Cointegration"

@property
def stat(self) -> float:
return self._stat

@property
def pvalue(self) -> float:
return self._pvalue

@property
def crit_vals(self) -> pd.Series:
return self._crit_vals

@property
def null(self) -> str:
return self._null

@property
def alternative(self) -> str:
return self._alternative

def __str__(self) -> str:
out = f"{self._name}\nStatistic:{self._stat}\nP-value:{self.pvalue()}"
out = f"{self._name}\nStatistic:{self._stat}\nP-value:{self.pvalue}"
out += f"\nNull: {self._null}, Alternative: {self._alternative}"
cv_str = ", ".join([f"{k}%: {v}" for k, v in self.crit_vals.items()])
out += f"\nCrit. Vals: {cv_str}"
return out

def __repr__(self):
return self.__str__() + f"\nID: {hex(id(self))}"


g = np.random.default_rng(0)
y = g.standard_normal((500, 1))
y = np.cumsum(y, 0)
x = y + g.standard_normal((500, 1))
print(engle_granger(y, x))
22 changes: 11 additions & 11 deletions arch/unitroot/critical_values/dickey_fuller.py
Expand Up @@ -16,15 +16,15 @@
tau_small_p = {}
tau_large_p = {}

tau_small_p["nc"] = [
tau_small_p["n"] = [
[0.6344, 1.2378, 3.2496],
[1.9129, 1.3857, 3.5322],
[2.7648, 1.4502, 3.4186],
[3.4336, 1.4835, 3.1900],
[4.0999, 1.5533, 3.5900],
[4.5388, 1.5344, 2.9807],
]
tau_small_p["nc"] = asarray(tau_small_p["nc"]) * small_scaling
tau_small_p["n"] = asarray(tau_small_p["n"]) * small_scaling

tau_small_p["c"] = [
[2.1659, 1.4412, 3.8269],
Expand Down Expand Up @@ -57,15 +57,15 @@
tau_small_p["ctt"] = asarray(tau_small_p["ctt"]) * small_scaling

large_scaling = asarray([1, 1e-1, 1e-1, 1e-2])
tau_large_p["nc"] = [
tau_large_p["n"] = [
[0.4797, 9.3557, -0.6999, 3.3066],
[1.5578, 8.5580, -2.0830, -3.3549],
[2.2268, 6.8093, -3.2362, -5.4448],
[2.7654, 6.4502, -3.0811, -4.4946],
[3.2684, 6.8051, -2.6778, -3.4972],
[3.7268, 7.1670, -2.3648, -2.8288],
]
tau_large_p["nc"] = asarray(tau_large_p["nc"]) * large_scaling
tau_large_p["n"] = asarray(tau_large_p["n"]) * large_scaling

tau_large_p["c"] = [
[1.7339, 9.3202, -1.2745, -1.0368],
Expand Down Expand Up @@ -105,7 +105,7 @@
# noinspection PyDictCreation
tau_2010: Dict[str, NDArray] = {}

tau_2010["nc"] = array(
tau_2010["n"] = array(
[
[
[-2.56574, -2.2358, -3.627, 0], # N = 1
Expand Down Expand Up @@ -314,21 +314,21 @@
# These are the cut-off values for the left-tail vs. the rest of the
# tau distribution, for getting the p-values
tau_star = {
"nc": [-1.04, -1.53, -2.68, -3.09, -3.07, -3.77],
"n": [-1.04, -1.53, -2.68, -3.09, -3.07, -3.77],
"c": [-1.61, -2.62, -3.13, -3.47, -3.78, -3.93],
"ct": [-2.89, -3.19, -3.50, -3.65, -3.80, -4.36],
"ctt": [-3.21, -3.51, -3.81, -3.83, -4.12, -4.63],
}

tau_min = {
"nc": [-19.04, -19.62, -21.21, -23.25, -21.63, -25.74],
"n": [-19.04, -19.62, -21.21, -23.25, -21.63, -25.74],
"c": [-18.83, -18.86, -23.48, -28.07, -25.96, -23.27],
"ct": [-16.18, -21.15, -25.37, -26.63, -26.53, -26.18],
"ctt": [-17.17, -21.1, -24.33, -24.03, -24.33, -28.22],
}

tau_max = {
"nc": [inf, 1.51, 0.86, 0.88, 1.05, 1.24],
"n": [inf, 1.51, 0.86, 0.88, 1.05, 1.24],
"c": [2.74, 0.92, 0.55, 0.61, 0.79, 1],
"ct": [0.7, 0.63, 0.71, 0.93, 1.19, 1.42],
"ctt": [0.54, 0.79, 1.08, 1.43, 3.49, 1.92],
Expand All @@ -342,7 +342,7 @@
adf_z_star = {"c": -7.96, "ct": -13.46, "ctt": -16.27}

adf_z_small_p = {
"nc": [0.0342, -0.6376, 0, -0.03872],
"n": [0.0342, -0.6376, 0, -0.03872],
"c": [2.2142, -1.7863, 0.3283, -0.07727],
"ct": [4.6476, -2.8932, 0.5832, -0.09990],
"ctt": [4.4599, -1.8635, 0.2126, -0.06070],
Expand All @@ -352,7 +352,7 @@
# the approximation function is
# p = norm.cdf(d_0 + d_1 * z + d_2*z**2 + d_3*z**3 + d_4*z**4)
adf_z_large_p = {
"nc": [0.4927, 6.906, 13.2331, 12.099, 0],
"n": [0.4927, 6.906, 13.2331, 12.099, 0],
"c": [1.7157, 0.5536, 4.5518, 2.2466, 4.2537],
"ct": [2.7119, 0.4594, 2.3747, 0.7488, 0.9333],
"ctt": [3.4216, 0.4170, 1.6939, 0.4203, 0.4153],
Expand All @@ -377,7 +377,7 @@
[-11.25012522, 38.19512941, -111.45098086, 226.91220644],
]
),
"nc": asarray(
"n": asarray(
[
[-13.2907657, 323.225921, -8318.89213, 79857.3965],
[-7.81966697, 217.366594, -5554.92749, 52618.7129],
Expand Down
1 change: 1 addition & 0 deletions arch/unitroot/critical_values/engle_granger.py
Expand Up @@ -182,6 +182,7 @@


class EngleGrangerCV(object):
# TODO: Improve to provide CVs directly as a function of T
def __getitem__(self, item):
# item ['nc',10,3]
if (
Expand Down
Expand Up @@ -61,7 +61,7 @@ def wrapper(n, trend, b, seed=0):
dview["adf_simulation"] = adf_simulation
lview = rc.load_balanced_view()

trends = ("nc", "c", "ct", "ctt")
trends = ("n", "c", "ct", "ctt")
T = array(
(
20,
Expand Down
Expand Up @@ -51,7 +51,7 @@ def wrapper(n, trend, b, seed=0):


if __name__ == "__main__":
trends = ("nc", "c", "ct", "ctt")
trends = ("n", "c", "ct", "ctt")
T = array(
(
20,
Expand Down
Expand Up @@ -79,7 +79,7 @@ def wrapper(n, trend, b, rng_seed=0):
dview["adf_simulation"] = adf_simulation
lview = rc.load_balanced_view()

trends = ("nc", "c", "ct", "ctt")
trends = ("n", "c", "ct", "ctt")
T = array(
(
20,
Expand Down
@@ -1,7 +1,7 @@
import numpy as np
from statsmodels.regression.linear_model import OLS

trends = ("nc", "c", "ct", "ctt")
trends = ("n", "c", "ct", "ctt")
critical_values = (1.0, 5.0, 10.0)
adf_z_cv_approx = {}
for t in trends:
Expand Down
Expand Up @@ -9,10 +9,11 @@
from random import shuffle
from typing import List

from joblib import Parallel, cpu_count, delayed
import numpy as np
from numpy.random import PCG64, Generator, SeedSequence

from joblib import Parallel, cpu_count, delayed

ROOT = os.path.join(os.path.split(os.path.abspath(__file__))[0], "engle-granger")
if not os.path.exists(ROOT):
os.mkdir(ROOT)
Expand Down

0 comments on commit 35b9c3c

Please sign in to comment.