Skip to content

Commit

Permalink
v
Browse files Browse the repository at this point in the history
  • Loading branch information
KamranGhasedi committed Aug 9, 2017
1 parent f77d629 commit 9bb06ac
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 23 deletions.
26 changes: 13 additions & 13 deletions DEPICT.py
Expand Up @@ -89,28 +89,28 @@ def use_least_loaded_gpu(least_loaded=None):
% (str(feature_map_sizes), str(dropouts), str(kernel_sizes), str(strides), str(paddings)))

############################## Build DEPICT Model ##############################
encoder, decoder, loss_recons, loss_recons_clean = build_MdA(input_var, n_in=dimensions,
feature_map_sizes=feature_map_sizes,
dropouts=dropouts, kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings)
encoder, decoder, loss_recons, loss_recons_clean = build_depict(input_var, n_in=dimensions,
feature_map_sizes=feature_map_sizes,
dropouts=dropouts, kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings)

############################## Pre-train DEPICT Model ##############################
print("\n...Start AutoEncoder training...")
initial_time = timeit.default_timer()
train_MdA_val(dataset, X, y, input_var, decoder, encoder, loss_recons, loss_recons_clean, num_clusters, output_path,
batch_size=batch_size, test_batch_size=test_batch_size, num_epochs=num_epochs, learning_rate=learning_rate,
verbose=verbose, seed=seed, continue_training=args.continue_training)
train_depict_ae(dataset, X, y, input_var, decoder, encoder, loss_recons, loss_recons_clean, num_clusters, output_path,
batch_size=batch_size, test_batch_size=test_batch_size, num_epochs=num_epochs, learning_rate=learning_rate,
verbose=verbose, seed=seed, continue_training=args.continue_training)

############################## Clustering Pre-trained DEPICT Features ##############################
y_pred, centroids = Clustering(dataset, X, y, input_var, encoder, num_clusters, output_path,
y_pred, centroids = clustering(dataset, X, y, input_var, encoder, num_clusters, output_path,
test_batch_size=test_batch_size, seed=seed, continue_training=args.continue_training)

############################## Train DEPICT Model ##############################
train_RLC(dataset, X, y, input_var, decoder, encoder, loss_recons, num_clusters, y_pred, output_path,
batch_size=batch_size, test_batch_size=test_batch_size, num_epochs=num_epochs,
learning_rate=learning_rate, rec_mult=reconstruct_hyperparam, clus_mult=cluster_hyperparam,
centroids=centroids, continue_training=args.continue_training)
train_depict(dataset, X, y, input_var, decoder, encoder, loss_recons, num_clusters, y_pred, output_path,
batch_size=batch_size, test_batch_size=test_batch_size, num_epochs=num_epochs,
learning_rate=learning_rate, rec_mult=reconstruct_hyperparam, clus_mult=cluster_hyperparam,
centroids=centroids, continue_training=args.continue_training)

final_time = timeit.default_timer()

Expand Down
20 changes: 10 additions & 10 deletions functions.py
Expand Up @@ -488,9 +488,9 @@ def build_eml(input_var=None, n_out=None, W_initial=None):
return l_out


def build_MdA(input_var=None, n_in=[None, None, None], feature_map_sizes=[50, 50],
dropouts=[0.1, 0.1, 0.1], kernel_sizes=[5, 5], strides=[2, 2],
paddings=[2, 2], hlayer_loss_param=0.1):
def build_depict(input_var=None, n_in=[None, None, None], feature_map_sizes=[50, 50],
dropouts=[0.1, 0.1, 0.1], kernel_sizes=[5, 5], strides=[2, 2],
paddings=[2, 2], hlayer_loss_param=0.1):
# ENCODER
l_e0 = lasagne.layers.DropoutLayer(
lasagne.layers.InputLayer(shape=(None, n_in[0], n_in[1], n_in[2]), input_var=input_var), p=dropouts[0])
Expand Down Expand Up @@ -554,9 +554,9 @@ def build_MdA(input_var=None, n_in=[None, None, None], feature_map_sizes=[50, 50
return l_e3, l_d0, loss_recons, loss_recons_clean


def train_MdA_val(dataset, X, y, input_var, decoder, encoder, loss_recons, loss_recons_clean, num_clusters, output_path,
batch_size=100, test_batch_size=100, num_epochs=1000, learning_rate=1e-4, verbose=1, seed=42,
continue_training=False):
def train_depict_ae(dataset, X, y, input_var, decoder, encoder, loss_recons, loss_recons_clean, num_clusters, output_path,
batch_size=100, test_batch_size=100, num_epochs=1000, learning_rate=1e-4, verbose=1, seed=42,
continue_training=False):
learning_rate_shared = theano.shared(lasagne.utils.floatX(learning_rate))
params = lasagne.layers.get_all_params(decoder, trainable=True)
updates = lasagne.updates.adam(loss_recons, params, learning_rate=learning_rate_shared)
Expand Down Expand Up @@ -630,7 +630,7 @@ def train_MdA_val(dataset, X, y, input_var, decoder, encoder, loss_recons, loss_
lasagne.layers.set_all_param_values(decoder, best_params_values)


def Clustering(dataset, X, y, input_var, encoder, num_clusters, output_path, test_batch_size=100, seed=42,
def clustering(dataset, X, y, input_var, encoder, num_clusters, output_path, test_batch_size=100, seed=42,
continue_training=False):
encoder_clean = lasagne.layers.get_output(encoder, deterministic=True)
encoder_clean_function = theano.function([input_var], encoder_clean)
Expand Down Expand Up @@ -692,9 +692,9 @@ def Clustering(dataset, X, y, input_var, encoder, num_clusters, output_path, tes
return np.int32(y_pred), np.float32(centroids)


def train_RLC(dataset, X, y, input_var, decoder, encoder, loss_recons, num_clusters, y_pred, output_path,
batch_size=100, test_batch_size=100, num_epochs=1000, learning_rate=1e-4, prediction_status='soft',
rec_mult=1, clus_mult=1, centroids=None, init_flag=1, continue_training=False):
def train_depict(dataset, X, y, input_var, decoder, encoder, loss_recons, num_clusters, y_pred, output_path,
batch_size=100, test_batch_size=100, num_epochs=1000, learning_rate=1e-4, prediction_status='soft',
rec_mult=1, clus_mult=1, centroids=None, init_flag=1, continue_training=False):
######################
# ADD RLC TO MdA #
######################
Expand Down

0 comments on commit 9bb06ac

Please sign in to comment.