Skip to content

Commit

Permalink
Changes to loss function
Browse files Browse the repository at this point in the history
  • Loading branch information
MichSchli committed Mar 23, 2017
1 parent 4363e6e commit 0fa6c4c
Show file tree
Hide file tree
Showing 7 changed files with 248 additions and 244 deletions.
460 changes: 226 additions & 234 deletions .idea/workspace.xml

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion code/decoders/bilinear_diag.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ def get_loss(self, mode='train'):
e1s, rs, e2s = self.compute_codes(mode=mode)

energies = tf.reduce_sum(e1s * rs * e2s, 1)
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(energies, self.Y))

weight = int(self.settings['NegativeSampleRate'])
return tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(energies, self.Y, weight))

def local_initialize_train(self):
self.Y = tf.placeholder(tf.float32, shape=[None])
Expand Down
3 changes: 2 additions & 1 deletion code/decoders/complex.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ def get_loss(self, mode='train'):
+ tf.reduce_sum(e1s_r * rs_i * e2s_i, 1) \
- tf.reduce_sum(e1s_i * rs_i * e2s_r, 1)

return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(energies, self.Y))
weight = int(self.settings['NegativeSampleRate'])
return tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(energies, self.Y, weight))

def local_initialize_train(self):
self.Y = tf.placeholder(tf.float32, shape=[None])
Expand Down
9 changes: 8 additions & 1 deletion code/encoders/message_gcns/gcn_basis.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def combine_messages(self, forward_messages, backward_messages, self_loop_messag
collected_messages_f = tf.sparse_tensor_dense_matmul(mtr_f, forward_messages)
collected_messages_b = tf.sparse_tensor_dense_matmul(mtr_b, backward_messages)

if mode=='train':
if False: #mode=='train':
choice = tf.select(tf.random_uniform([50,1]) > 0.5, tf.ones([50,1], dtype=tf.int32), tf.zeros([50,1], dtype=tf.int32))
options = tf.constant([[2., 0.], [0., 2.]])
mixer = tf.nn.embedding_lookup(options, choice)
Expand All @@ -99,3 +99,10 @@ def combine_messages(self, forward_messages, backward_messages, self_loop_messag
new_embedding = tf.nn.relu(new_embedding)

return new_embedding

def local_get_regularization(self):
regularization = tf.reduce_mean(tf.square(self.W_forward))
regularization += tf.reduce_mean(tf.square(self.W_backward))
regularization += tf.reduce_mean(tf.square(self.W_self))

return 0.0 * regularization
4 changes: 2 additions & 2 deletions run-train.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env bash

DATASET="DegreeData"
SETTINGS="complex.exp"
DATASET="FB-Toutanova"
SETTINGS="distmult.exp"

SCRIPT_DIR=$(dirname $(readlink -f $0))

Expand Down
10 changes: 6 additions & 4 deletions settings/distmult.exp
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,23 @@
RegularizationParameter=0.01

[Shared]
CodeDimension=50
CodeDimension=100

[Optimizer]
MaxGradientNorm=1
ReportTrainLossEvery=100
ReportTrainLossEvery=5
BatchSize=5000
Minibatches=Yes

[EarlyStopping]
CheckEvery=500
CheckEvery=10

[Algorithm]
Name=Adam
learning_rate=0.01

[General]
NegativeSampleRate=1
NegativeSampleRate=10
GraphSplitSize=15000


Expand Down
2 changes: 1 addition & 1 deletion settings/gcn_basis.exp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[Encoder]
Name=gcn_basis
DropoutKeepProbability=1.0
InternalEncoderDimension=50
InternalEncoderDimension=100
NumberOfBasisFunctions=2
NumberOfLayers=1
UseInputTransform=No
Expand Down

0 comments on commit 0fa6c4c

Please sign in to comment.