Skip to content

Commit

Permalink
TST: Additional silence
Browse files Browse the repository at this point in the history
Silence more tests
  • Loading branch information
bashtage committed Nov 20, 2017
1 parent ad8cb9f commit 54944d9
Show file tree
Hide file tree
Showing 8 changed files with 29 additions and 25 deletions.
4 changes: 2 additions & 2 deletions statsmodels/base/tests/test_shrink_pickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ def setup(self):
class TestPickleFormula4(TestPickleFormula2):

def setup(self):
self.results = sm.OLS.from_formula("Y ~ np.log(abs(A)+1) + B * C", data=self.data).fit()
self.results = sm.OLS.from_formula("Y ~ np.log(abs(A) + 1) + B * C", data=self.data).fit()

# we need log in module namespace for the following test
from numpy import log
Expand All @@ -267,7 +267,7 @@ class TestPickleFormula5(TestPickleFormula2):
def setup(self):
# if we import here, then unpickling fails -> exception in test
#from numpy import log
self.results = sm.OLS.from_formula("Y ~ log(abs(A)+1) + B * C", data=self.data).fit()
self.results = sm.OLS.from_formula("Y ~ log(abs(A) + 1) + B * C", data=self.data).fit()


class TestRemoveDataPicklePoissonRegularized(RemoveDataPickle):
Expand Down
3 changes: 2 additions & 1 deletion statsmodels/discrete/tests/test_discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -1585,7 +1585,8 @@ def test_formula_missing_exposure():
assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')

# make sure this raises
exposure = pd.Series(np.random.chisquare(5, 5))
exposure = pd.Series(np.random.uniform(size=5))
df.loc[3, 'Bar'] = 4 # nan not relevant for ValueError for shape mismatch
assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],
exposure=exposure)

Expand Down
4 changes: 2 additions & 2 deletions statsmodels/duration/survfunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ def _calc_survfunc_right(time, status, weights=None, entry=None, compress=True,
se[(n == d) | (n == 0)] = np.nan
se = np.cumsum(se)
se = np.sqrt(se)
locs = np.logical_not(np.isinf(se) & (sp == 0))
locs = np.isfinite(se) | (sp != 0)
se[locs] *= sp[locs]
se[np.logical_not(locs)] = np.nan
se[~locs] = np.nan
else:
# Tsiatis' (1981) formula
se = d / (n * n).astype(np.float64)
Expand Down
6 changes: 3 additions & 3 deletions statsmodels/nonparametric/tests/test_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@

y = results['accident']
x = results['service']
use_mask = x >= 0
x = np.log(x[use_mask].copy())
y = y[use_mask]
positive = x >= 0
x = np.log(x[positive])
y = y[positive]
xg = np.linspace(x.min(), x.max(), 40) # grid points default in Stata

#kern_name = 'gau'
Expand Down
9 changes: 3 additions & 6 deletions statsmodels/stats/contrast.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,9 @@ def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,
self.pvalue = self.dist.sf(self.statistic, df_denom)
else:
"normal"
# TODO: Need a simpler method to handle NaNs
pvalue = np.empty_like(value)
pvalue.fill(np.nan)
not_nan = np.logical_not(np.isnan(value))
pvalue[not_nan] = self.dist.sf(np.abs(value[not_nan])) * 2
self.pvalue = pvalue
self.pvalue = np.full_like(value, np.nan)
not_nan = ~np.isnan(value)
self.pvalue[not_nan] = self.dist.sf(np.abs(value[not_nan])) * 2

# cleanup
# should we return python scalar?
Expand Down
18 changes: 12 additions & 6 deletions statsmodels/stats/power.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,12 +269,18 @@ def func(x):
success = 1
else:
# try backup
#TODO: check more cases to make this robust
val, infodict, ier, msg = optimize.fsolve(func, start_value,
full_output=True) #scalar
#val = optimize.newton(func, start_value) #scalar
fval = infodict['fvec']
fit_res.append(infodict)
# TODO: check more cases to make this robust
if not np.isnan(start_value):
val, infodict, ier, msg = optimize.fsolve(func, start_value,
full_output=True) #scalar
#val = optimize.newton(func, start_value) #scalar
fval = infodict['fvec']
fit_res.append(infodict)
else:
ier = -1
fval = 1
fit_res.append([None])

if ier == 1 and np.abs(fval) < 1e-4 :
success = 1
else:
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/stats/tests/test_tost.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ def test_tost_asym():

#SMOKE tests: foe multi-endpoint vectorized, k on k
resall = smws.ttost_ind(clinic[15:, 2:7], clinic[:15, 2:7],
[-1.0, -1.0, -1.5, -1.5, -1.5], 0.6,
np.exp([-1.0, -1.0, -1.5, -1.5, -1.5]), 0.6,
usevar='unequal', transform=np.log)
resall = smws.ttost_ind(clinic[15:, 2:7], clinic[:15, 2:7],
[-1.0, -1.0, -1.5, -1.5, -1.5], 0.6,
Expand Down
8 changes: 4 additions & 4 deletions statsmodels/tools/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def recipr(x):
x = np.asarray(x)
out = np.zeros_like(x, dtype=np.float64)
nans = np.isnan(x.flat)
pos = np.logical_not(nans)
pos = ~nans
pos[pos] = pos[pos] & (x.flat[pos] > 0)
out.flat[pos] = 1.0 / x.flat[pos]
out.flat[nans] = np.nan
Expand All @@ -380,9 +380,9 @@ def recipr0(x):
x = np.asarray(x)
out = np.zeros_like(x, dtype=np.float64)
nans = np.isnan(x.flat)
not_zero = np.logical_not(nans)
not_zero[not_zero] = not_zero[not_zero] & (x.flat[not_zero] != 0)
out.flat[not_zero] = 1.0 / x.flat[not_zero]
non_zero = ~nans
non_zero[non_zero] = non_zero[non_zero] & (x.flat[non_zero] != 0)
out.flat[non_zero] = 1.0 / x.flat[non_zero]
out.flat[nans] = np.nan
return out

Expand Down

0 comments on commit 54944d9

Please sign in to comment.