Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions neural_nets/activations/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,3 +149,20 @@ def grad(self, x):
def grad2(self, x):
# 0 if x >= 0 else alpha * e^(z)
return np.where(x >= 0, np.zeros_like(x), self.alpha * np.exp(x))


class SoftPlus(ActivationBase):
def __init__(self):
super().__init__()

def __str__(self):
return "SoftPlus"

def fn(self, z):
return np.log(np.exp(z) + 1)

def grad(self, x):
return np.exp(x) / (np.exp(x) + 1)

def grad2(self, x):
return np.exp(x) / ((np.exp(x) + 1) ** 2)
6 changes: 3 additions & 3 deletions neural_nets/activations/plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
sns.set_style("white")
sns.set_context("notebook", font_scale=0.7)

from activations import Affine, ReLU, LeakyReLU, Tanh, Sigmoid, ELU
from activations import Affine, ReLU, LeakyReLU, Tanh, Sigmoid, ELU, SoftPlus


def plot_activations():
fig, axes = plt.subplots(2, 3, sharex=True, sharey=True)
fns = [Affine(), Tanh(), Sigmoid(), ReLU(), LeakyReLU(), ELU()]
fig, axes = plt.subplots(2, 4, sharex=True, sharey=True)
fns = [Affine(), Tanh(), Sigmoid(), ReLU(), LeakyReLU(), ELU(), SoftPlus()]
for ax, fn in zip(axes.flatten(), fns):
X = np.linspace(-3, 3, 100).astype(float).reshape(100, 1)
ax.plot(X, fn(X), label=r"$y$", alpha=0.7)
Expand Down
6 changes: 3 additions & 3 deletions neural_nets/activations/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
sns.set_style("white")
sns.set_context("notebook", font_scale=0.7)

from activations import Affine, ReLU, LeakyReLU, Tanh, Sigmoid, ELU
from activations import Affine, ReLU, LeakyReLU, Tanh, Sigmoid, ELU,SoftPlus


def plot_activations():
fig, axes = plt.subplots(2, 3, sharex=True, sharey=True)
fns = [Affine(), Tanh(), Sigmoid(), ReLU(), LeakyReLU(), ELU()]
fig, axes = plt.subplots(2, 4, sharex=True, sharey=True)
fns = [Affine(), Tanh(), Sigmoid(), ReLU(), LeakyReLU(), ELU(), SoftPlus()]
for ax, fn in zip(axes.flatten(), fns):
X = np.linspace(-3, 3, 100).astype(float).reshape(100, 1)
ax.plot(X, fn(X), label=r"$y$", alpha=0.7)
Expand Down
40 changes: 40 additions & 0 deletions neural_nets/tests/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,11 @@ def test_activations(N=50):
test_elu_activation(N)
test_elu_grad(N)

print("Testing SoftPlus activation")
time.sleep(1)
test_softplus_activation(N)
test_softplus_grad(N)


def test_layers(N=50):
print("Testing FullyConnected layer")
Expand Down Expand Up @@ -526,6 +531,23 @@ def test_relu_activation(N=None):
i += 1


def test_softplus_activation(N=None):
from activations import SoftPlus

N = np.inf if N is None else N

mine = SoftPlus()
gold = lambda z: F.softplus(torch.FloatTensor(z)).numpy()

i = 0
while i < N:
n_dims = np.random.randint(1, 100)
z = random_stochastic_matrix(1, n_dims)
assert_almost_equal(mine.fn(z), gold(z))
print("PASSED")
i += 1


#######################################################################
# Activation Gradients #
#######################################################################
Expand Down Expand Up @@ -634,6 +656,24 @@ def test_softmax_grad(N=None):
i += 1


def test_softplus_grad(N=None):
from activations import SoftPlus

N = np.inf if N is None else N

mine = SoftPlus()
gold = torch_gradient_generator(F.softplus)

i = 0
while i < N:
n_ex = np.random.randint(1, 100)
n_dims = np.random.randint(1, 100)
z = random_tensor((n_ex, n_dims), standardize=True)
assert_almost_equal(mine.grad(z), gold(z))
print("PASSED")
i += 1


#######################################################################
# Layers #
#######################################################################
Expand Down