Skip to content

Commit

Permalink
Removed Bias Terms from L2 Loss
Browse files Browse the repository at this point in the history
  • Loading branch information
SuyashLakhotia committed Mar 13, 2018
1 parent 83fcc47 commit f91fb23
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 8 deletions.
2 changes: 0 additions & 2 deletions cnn_fchollet.py
Expand Up @@ -78,7 +78,6 @@ def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, emb
b = tf.Variable(tf.constant(0.1, shape=[num_units]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

self.x = tf.nn.xw_plus_b(self.x, W, b)
self.x = tf.nn.relu(self.x)
Expand All @@ -92,7 +91,6 @@ def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, emb
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

self.scores = tf.nn.xw_plus_b(self.x, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
Expand Down
2 changes: 0 additions & 2 deletions cnn_ykim.py
Expand Up @@ -72,7 +72,6 @@ def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, emb
b = tf.Variable(tf.constant(0.1, shape=[num_units]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

self.x = tf.nn.xw_plus_b(self.x, W, b)
self.x = tf.nn.relu(self.x)
Expand All @@ -86,7 +85,6 @@ def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, emb
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

self.scores = tf.nn.xw_plus_b(self.x, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
Expand Down
2 changes: 0 additions & 2 deletions graph_cnn.py
Expand Up @@ -89,7 +89,6 @@ def __init__(self, filter_name, L, K, F, P, FC, batch_size, num_vertices, num_cl
b = tf.Variable(tf.constant(0.1, shape=[num_units]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

x = tf.nn.xw_plus_b(x, W, b)
x = tf.layers.batch_normalization(x, training=self.train_flag)
Expand All @@ -104,7 +103,6 @@ def __init__(self, filter_name, L, K, F, P, FC, batch_size, num_vertices, num_cl
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

self.scores = tf.nn.xw_plus_b(x, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
Expand Down
2 changes: 0 additions & 2 deletions mlp.py
Expand Up @@ -26,7 +26,6 @@ def __init__(self, vocab_size, num_classes, layers, l2_reg_lambda):
b = tf.Variable(tf.constant(0.1, shape=[num_units]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

x = tf.nn.xw_plus_b(x, W, b)
x = tf.nn.relu(x)
Expand All @@ -40,7 +39,6 @@ def __init__(self, vocab_size, num_classes, layers, l2_reg_lambda):
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")

l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)

self.scores = tf.nn.xw_plus_b(x, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
Expand Down

0 comments on commit f91fb23

Please sign in to comment.