Skip to content

Commit

Permalink
Update TLSTM.py
Browse files Browse the repository at this point in the history
  • Loading branch information
baytasin committed Jan 23, 2018
1 parent 62139df commit 72a3e2e
Showing 1 changed file with 71 additions and 31 deletions.
102 changes: 71 additions & 31 deletions TLSTM.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,53 +4,92 @@
import tensorflow as tf


class T_LSTM(object):
class TLSTM(object):
def init_weights(self, input_dim, output_dim, name, std=0.1, reg=None):
return tf.get_variable(name,shape=[input_dim, output_dim],initializer=tf.random_normal_initializer(0.0, std),regularizer = reg)

def init_bias(self, output_dim, name):
return tf.get_variable(name,shape=[output_dim],initializer=tf.constant_initializer(0.0))
return tf.get_variable(name,shape=[output_dim],initializer=tf.constant_initializer(1.0))

def __init__(self, input_dim, output_dim, hidden_dim, fc_dim):
def no_init_weights(self, input_dim, output_dim, name):
return tf.get_variable(name,shape=[input_dim, output_dim])

def no_init_bias(self, output_dim, name):
return tf.get_variable(name,shape=[output_dim])

def __init__(self, input_dim, output_dim, hidden_dim, fc_dim,train):

self.input_dim = input_dim
self.hidden_dim = hidden_dim

# [batch size x seq length x input dim]
self.input = tf.placeholder('float', shape=[None, None, self.input_dim])
self.labels = tf.placeholder('float', shape=[None, output_dim])
self.time = tf.placeholder('float', shape=[None, None])
self.keep_prob = tf.placeholder(tf.float32)

self.Wi = self.init_weights(self.input_dim, self.hidden_dim, name='Input_Hidden_weight',reg=None)
self.Ui = self.init_weights(self.hidden_dim, self.hidden_dim, name='Input_State_weight',reg=None)
self.bi = self.init_bias(self.hidden_dim, name='Input_Hidden_bias')
if train == 1:

self.Wf = self.init_weights(self.input_dim, self.hidden_dim, name='Forget_Hidden_weight',reg=None)
self.Uf = self.init_weights(self.hidden_dim, self.hidden_dim, name='Forget_State_weight',reg=None)
self.bf = self.init_bias(self.hidden_dim, name='Forget_Hidden_bias')

self.Wog = self.init_weights(self.input_dim, self.hidden_dim, name='Output_Hidden_weight',reg=None)
self.Uog = self.init_weights(self.hidden_dim, self.hidden_dim, name='Output_State_weight',reg=None)
self.bog = self.init_bias(self.hidden_dim, name='Output_Hidden_bias')
self.Wi = self.init_weights(self.input_dim, self.hidden_dim, name='Input_Hidden_weight',reg=None)
self.Ui = self.init_weights(self.hidden_dim, self.hidden_dim, name='Input_State_weight',reg=None)
self.bi = self.init_bias(self.hidden_dim, name='Input_Hidden_bias')

self.Wc = self.init_weights(self.input_dim, self.hidden_dim, name='Cell_Hidden_weight',reg=None)
self.Uc = self.init_weights(self.hidden_dim, self.hidden_dim, name='Cell_State_weight',reg=None)
self.bc = self.init_bias(self.hidden_dim, name='Cell_Hidden_bias')
self.Wf = self.init_weights(self.input_dim, self.hidden_dim, name='Forget_Hidden_weight',reg=None)
self.Uf = self.init_weights(self.hidden_dim, self.hidden_dim, name='Forget_State_weight',reg=None)
self.bf = self.init_bias(self.hidden_dim, name='Forget_Hidden_bias')

self.W_decomp = self.init_weights(self.hidden_dim, self.hidden_dim, name='Decomposition_Hidden_weight',reg=None)
self.b_decomp = self.init_bias(self.hidden_dim, name='Decomposition_Hidden_bias_enc')
self.Wog = self.init_weights(self.input_dim, self.hidden_dim, name='Output_Hidden_weight',reg=None)
self.Uog = self.init_weights(self.hidden_dim, self.hidden_dim, name='Output_State_weight',reg=None)
self.bog = self.init_bias(self.hidden_dim, name='Output_Hidden_bias')

self.Wo = self.init_weights(self.hidden_dim, fc_dim, name='Fc_Layer_weight',reg=None)#tf.contrib.layers.l2_regularizer(scale=0.001)
self.bo = self.init_bias(fc_dim, name='Fc_Layer_bias')

self.W_softmax = self.init_weights(fc_dim, output_dim, name='Output_Layer_weight',reg=None)#tf.contrib.layers.l2_regularizer(scale=0.001)
self.b_softmax = self.init_bias(output_dim, name='Output_Layer_bias')
self.Wc = self.init_weights(self.input_dim, self.hidden_dim, name='Cell_Hidden_weight',reg=None)
self.Uc = self.init_weights(self.hidden_dim, self.hidden_dim, name='Cell_State_weight',reg=None)
self.bc = self.init_bias(self.hidden_dim, name='Cell_Hidden_bias')

# [batch size x seq length x input dim]
self.input = tf.placeholder('float', shape=[None, None, self.input_dim])
self.labels = tf.placeholder('float', shape=[None, output_dim])
self.time = tf.placeholder('float', shape=[None, None])
self.keep_prob = tf.placeholder(tf.float32)
self.W_decomp = self.init_weights(self.hidden_dim, self.hidden_dim, name='Decomposition_Hidden_weight',reg=None)
self.b_decomp = self.init_bias(self.hidden_dim, name='Decomposition_Hidden_bias_enc')

self.Wo = self.init_weights(self.hidden_dim, fc_dim, name='Fc_Layer_weight',reg=None)#tf.contrib.layers.l2_regularizer(scale=0.001)
self.bo = self.init_bias(fc_dim, name='Fc_Layer_bias')

self.W_softmax = self.init_weights(fc_dim, output_dim, name='Output_Layer_weight',
reg=None)#tf.contrib.layers.l2_regularizer(scale=0.001)
self.b_softmax = self.init_bias(output_dim, name='Output_Layer_bias')

else:

def T_LSTM_Unit(self, prev_hidden_memory, concat_input):

self.Wi = self.no_init_weights(self.input_dim, self.hidden_dim, name='Input_Hidden_weight')
self.Ui = self.no_init_weights(self.hidden_dim, self.hidden_dim, name='Input_State_weight')
self.bi = self.no_init_bias(self.hidden_dim, name='Input_Hidden_bias')

self.Wf = self.no_init_weights(self.input_dim, self.hidden_dim, name='Forget_Hidden_weight')
self.Uf = self.no_init_weights(self.hidden_dim, self.hidden_dim, name='Forget_State_weight')
self.bf = self.no_init_bias(self.hidden_dim, name='Forget_Hidden_bias')

self.Wog = self.no_init_weights(self.input_dim, self.hidden_dim, name='Output_Hidden_weight')
self.Uog = self.no_init_weights(self.hidden_dim, self.hidden_dim, name='Output_State_weight')
self.bog = self.no_init_bias(self.hidden_dim, name='Output_Hidden_bias')

self.Wc = self.no_init_weights(self.input_dim, self.hidden_dim, name='Cell_Hidden_weight')
self.Uc = self.no_init_weights(self.hidden_dim, self.hidden_dim, name='Cell_State_weight')
self.bc = self.no_init_bias(self.hidden_dim, name='Cell_Hidden_bias')

self.W_decomp = self.no_init_weights(self.hidden_dim, self.hidden_dim, name='Decomposition_Hidden_weight')
self.b_decomp = self.no_init_bias(self.hidden_dim, name='Decomposition_Hidden_bias_enc')

self.Wo = self.no_init_weights(self.hidden_dim, fc_dim, name='Fc_Layer_weight')
self.bo = self.no_init_bias(fc_dim, name='Fc_Layer_bias')

self.W_softmax = self.no_init_weights(fc_dim, output_dim, name='Output_Layer_weight')
self.b_softmax = self.no_init_bias(output_dim, name='Output_Layer_bias')





def TLSTM_Unit(self, prev_hidden_memory, concat_input):
prev_hidden_state, prev_cell = tf.unstack(prev_hidden_memory)

batch_size = tf.shape(concat_input)[0]
Expand All @@ -63,7 +102,7 @@ def T_LSTM_Unit(self, prev_hidden_memory, concat_input):
T = self.map_elapse_time(t)

# Decompose the previous cell if there is a elapse time
C_ST = tf.nn.sigmoid(tf.matmul(prev_cell, self.W_decomp) + self.b_decomp)
C_ST = tf.nn.tanh(tf.matmul(prev_cell, self.W_decomp) + self.b_decomp)
C_ST_dis = tf.multiply(T, C_ST)
# if T is 0, then the weight is one
prev_cell = prev_cell - C_ST + C_ST_dis
Expand Down Expand Up @@ -100,7 +139,7 @@ def get_states(self): # Returns all hidden states for the samples in a batch
# make scan_time [seq_length x batch_size x 1]
scan_time = tf.reshape(scan_time, [tf.shape(scan_time)[0],tf.shape(scan_time)[1],1])
concat_input = tf.concat([scan_time, scan_input],2) # [seq_length x batch_size x input_dim+1]
packed_hidden_states = tf.scan(self.T_LSTM_Unit, concat_input, initializer=ini_state_cell, name='states')
packed_hidden_states = tf.scan(self.TLSTM_Unit, concat_input, initializer=ini_state_cell, name='states')
all_states = packed_hidden_states[:, 0, :, :]
return all_states

Expand Down Expand Up @@ -131,8 +170,9 @@ def map_elapse_time(self, t):
c1 = tf.constant(1, dtype=tf.float32)
c2 = tf.constant(2.7183, dtype=tf.float32)

# T = tf.multiply(self.wt, t) + self.bt

T = tf.div(c1, tf.log(t + c2), name='Log_elapse_time')
# T = tf.div(c1, tf.add(t , c1), name='Log_elapse_time')

Ones = tf.ones([1, self.hidden_dim], dtype=tf.float32)

Expand Down

0 comments on commit 72a3e2e

Please sign in to comment.