Skip to content

Commit

Permalink
fix weight decay bugs and apply cross validation
Browse files Browse the repository at this point in the history
  • Loading branch information
calebge(葛政) committed Aug 28, 2018
1 parent 6d61d18 commit 367bbdf
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 15 deletions.
14 changes: 7 additions & 7 deletions facenet.py
Expand Up @@ -500,9 +500,9 @@ def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_fold
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))

#diff = np.subtract(embeddings1, embeddings2)
#dist = np.sum(np.square(diff),1)
dist = (embeddings1 * embeddings2).sum(axis=1)/2.
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
#dist = (embeddings1 * embeddings2).sum(axis=1)/2.
indices = np.arange(nrof_pairs)

best_thres = []
Expand Down Expand Up @@ -546,9 +546,9 @@ def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_targe
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)

#diff = np.subtract(embeddings1, embeddings2)
#dist = np.sum(np.square(diff),1)
dist = (embeddings1 * embeddings2).sum(axis=1)/2.
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
#dist = (embeddings1 * embeddings2).sum(axis=1)/2.
indices = np.arange(nrof_pairs)

for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
Expand All @@ -572,7 +572,7 @@ def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_targe


def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.greater(dist, threshold)
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
Expand Down
4 changes: 2 additions & 2 deletions lfw.py
Expand Up @@ -33,12 +33,12 @@

def evaluate(embeddings, actual_issame, nrof_folds=10):
# Calculate evaluation metrics
thresholds = np.arange(-1, 1, 0.01)
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = facenet.calculate_roc(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), nrof_folds=nrof_folds)
thresholds = np.arange(-1, 1, 0.001)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = facenet.calculate_val(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
Expand Down
1 change: 1 addition & 0 deletions models/resface.py
Expand Up @@ -10,6 +10,7 @@
def prelu(x):
with tf.variable_scope('PRelu'):
alphas = tf.get_variable(name='prelu_alphas', dtype=tf.float32, initializer=tf.constant_initializer(value=0.25),shape=[x.get_shape()[-1]])
#alphas = tf.Variable(tf.constant(0.25,dtype=tf.float32,shape=[x.get_shape()[-1]]),name='prelu_alphas')
pos = tf.nn.relu(x)
neg = alphas * (x - abs(x)) * 0.5
return pos + neg
Expand Down
21 changes: 15 additions & 6 deletions train.py
Expand Up @@ -153,14 +153,20 @@ def main(args):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=AM_logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')


#print('test',tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

for weights in slim.get_variables_by_name('kernel'):
kernel_regularization = tf.contrib.layers.l2_regularizer(args.weight_decay)(weights)
print(weights)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, kernel_regularization)

regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

if args.weight_decay==0:
total_loss = tf.add_n([cross_entropy_mean], name='total_loss')
else:
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
tf.add_to_collection('losses', total_loss)

#define two saver in case under 'finetuning on different dataset' situation
Expand All @@ -169,7 +175,8 @@ def main(args):

#train_op = facenet.train(total_loss, global_step, args.optimizer,
# learning_rate, args.moving_average_decay, tf.trainable_variables(), args.log_histograms)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(total_loss,global_step = global_step,var_list=tf.trainable_variables())
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(total_loss,global_step = global_step,var_list=tf.trainable_variables())
train_op = tf.train.MomentumOptimizer(learning_rate,momentum=0.9).minimize(total_loss,global_step=global_step,var_list=tf.trainable_variables())
summary_op = tf.summary.merge_all()

# Start running operations on the Graph.
Expand Down Expand Up @@ -199,9 +206,11 @@ def main(args):

print('validation running...')
if args.lfw_dir:
best_accuracy = evaluate_double(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings,
label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer,best_accuracy,saver_save,model_dir,subdir,image_batch,args)

#best_accuracy = evaluate_double(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings,
# label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer,best_accuracy, saver_save,model_dir,subdir,image_batch,args)

best_accuracy = evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings,
label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer,best_accuracy,saver_save,model_dir,subdir)
return model_dir

def find_threshold(var, percentile):
Expand Down

0 comments on commit 367bbdf

Please sign in to comment.