Skip to content

Commit

Permalink
Relax tensorflow version limit
Browse files Browse the repository at this point in the history
  • Loading branch information
kbattocchi committed Jan 28, 2021
1 parent f73adc8 commit 35470ff
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 4 deletions.
8 changes: 6 additions & 2 deletions econml/iv/nnet/_deepiv.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,13 @@ def mog_loss_model(n_components, d_t):
# LL = C - log(sum(pi_i/sig^d * exp(-d2/(2*sig^2))))
# Use logsumexp for numeric stability:
# LL = C - log(sum(exp(-d2/(2*sig^2) + log(pi_i/sig^d))))
# TODO: does the numeric stability actually make any difference?
def make_logloss(d2, sig, pi):
return -K.logsumexp(-d2 / (2 * K.square(sig)) + K.log(pi / K.pow(sig, d_t)), axis=-1)
# logsumexp doesn't exist in keras 2.4; simulate it
values = - d2 / (2 * K.square(sig)) + K.log(pi / K.pow(sig, d_t))
# logsumexp(a,b,c) = log(exp(a)+exp(b)+exp(c)) = log((exp(a-k)+exp(b-k)+exp(c-k))*exp(k))
# = log((exp(a-k)+exp(b-k)+exp(c-k))) + k
mx = K.max(values, axis=-1)
return -K.log(K.sum(K.exp(values - L.Reshape((-1, 1))(mx)), axis=-1)) - mx

ll = L.Lambda(lambda dsp: make_logloss(*dsp), output_shape=(1,))([d2, sig, pi])

Expand Down
20 changes: 20 additions & 0 deletions econml/tests/test_deepiv.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,26 @@ def test_stop_grad(self):
model.compile('nadam')
model.fit([np.array([[1]]), np.array([[2]]), np.array([[0]])], [])

def test_mog_loss(self):
inputs = [keras.layers.Input(shape=s) for s in [(3,), (3, 2), (3,), (2,)]]
ll_model = keras.engine.Model(inputs, mog_loss_model(3, 2)(inputs))

for n in range(10):
ps = -np.log(np.random.uniform(size=(3,)))
pi = ps / np.sum(ps)
mu = np.random.normal(size=(3, 2))
sig = np.exp(np.random.normal(size=3,))
t = np.random.normal(size=(2,))

pred = ll_model.predict([pi.reshape(1, 3), mu.reshape(1, 3, 2), sig.reshape(1, 3), t.reshape(1, 2)])

# LL = C - log(sum(pi_i/sig^d * exp(-d2/(2*sig^2))))
d = mu - t.reshape(-1, 2)
d2 = np.sum(d * d, axis=-1)
ll = -np.log(np.sum(pi / (sig * sig) * np.exp(-d2 / (2 * sig * sig)), axis=0))

assert np.allclose(ll, pred[0])

@pytest.mark.slow
def test_deepiv_shape(self):
fit_opts = {"epochs": 2}
Expand Down
4 changes: 2 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ install_requires =
numpy
scipy > 1.4.0
scikit-learn >= 0.24
keras < 2.4
keras
sparse
tensorflow > 1.10, < 2.3
tensorflow > 1.10
joblib >= 0.13.0
numba != 0.42.1
statsmodels >= 0.9
Expand Down

0 comments on commit 35470ff

Please sign in to comment.