Skip to content

Commit

Permalink
update to transition before integrating action prediction into embodi…
Browse files Browse the repository at this point in the history
…ment mapping
  • Loading branch information
mjedmonds committed Jul 11, 2017
1 parent 7311dd8 commit d20edd9
Show file tree
Hide file tree
Showing 8 changed files with 24 additions and 13 deletions.
Binary file modified transition_srv/scripts/models/map/model.ckpt.data-00000-of-00001
Binary file not shown.
Binary file modified transition_srv/scripts/models/map/model.ckpt.index
Binary file not shown.
Binary file modified transition_srv/scripts/models/map/model.ckpt.meta
Binary file not shown.
Binary file not shown.
Binary file modified transition_srv/scripts/models/transition/model.ckpt.index
Binary file not shown.
Binary file modified transition_srv/scripts/models/transition/model.ckpt.meta
Binary file not shown.
2 changes: 1 addition & 1 deletion transition_srv/scripts/transition_model.py
Expand Up @@ -122,7 +122,7 @@ def train_mapping():

# Parameters
learning_rate = 0.001
training_epochs = 5000
training_epochs = 7000
batch_size = 100
display_step = 50
total_batch = 20
Expand Down
35 changes: 23 additions & 12 deletions transition_srv/scripts/transition_model_common.py
Expand Up @@ -277,7 +277,7 @@ def get_scope_variable(scope_name, var, shape, initializer):

def create_mapping_model(x, keep_prob, n_dim1, n_dim2, train=False):
with tf.variable_scope('mapping'):
layer_sizes = [10, 10, 10, n_dim2]
layer_sizes = [8, 8, 8, 8, n_dim2]

# Store layers weight & bias
weights = [ get_scope_variable('map', 'weight_0', [n_dim1, layer_sizes[0]], initializer=tf.random_normal_initializer()) ]
Expand All @@ -293,23 +293,34 @@ def create_mapping_model(x, keep_prob, n_dim1, n_dim2, train=False):

last_layer = layer_0
layer_idx = 1
dropout_layer = 1
dropout_layers = [1, 2, 3]
dropout_idx = 0
add_dropout = True
while layer_idx < dropout_layer:
layer_i = tf.add(tf.matmul(last_layer, weights[layer_idx]), biases[layer_idx])
layer_i = tf.nn.relu(layer_i)

# layer_1 = tf.nn.batch_normalization(layer_1, weights['n1_mean'], weights['n1_var'], 0, 0, 1e-3)
last_layer = layer_i
layer_idx += 1
while layer_idx < len(layer_sizes)-1:
while dropout_idx < len(dropout_layers) and layer_idx < dropout_layers[dropout_idx]:
layer_i = tf.add(tf.matmul(last_layer, weights[layer_idx]), biases[layer_idx])
layer_i = tf.nn.relu(layer_i)

if add_dropout:
layer_i = tf.nn.dropout(last_layer, keep_prob)
last_layer = layer_i
# layer_1 = tf.nn.batch_normalization(layer_1, weights['n1_mean'], weights['n1_var'], 0, 0, 1e-3)
last_layer = layer_i
layer_idx += 1

# finished adding dropout layers
if dropout_idx >= len(dropout_layers):
break

if add_dropout:
layer_i = tf.nn.dropout(last_layer, keep_prob)
last_layer = layer_i
dropout_idx += 1

# create any remaining layers (e.g. if there are no dropout layers)
while layer_idx < len(layer_sizes)-1:
layer_i = tf.add(tf.matmul(last_layer, weights[layer_idx]), biases[layer_idx])
layer_i = tf.nn.relu(layer_i)

# layer_1 = tf.nn.batch_normalization(layer_1, weights['n1_mean'], weights['n1_var'], 0, 0, 1e-3)
last_layer = layer_i
layer_idx += 1

Expand All @@ -320,7 +331,7 @@ def create_mapping_model(x, keep_prob, n_dim1, n_dim2, train=False):

def create_model(n_input, n_classes, train=False):

enc_size = 6
enc_size = 8

def create_autoencoder(x):
# layer_sizes = [64, 16, 32, 128, n_input]
Expand Down

0 comments on commit d20edd9

Please sign in to comment.