Skip to content

Commit 8d31f5a

Browse files
author
Lazy Programmer
committed
take away dropout
1 parent 58b1d46 commit 8d31f5a

File tree

2 files changed

+16
-3
lines changed

2 files changed

+16
-3
lines changed

Diff for: nlp_class3/attention.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,11 @@ def softmax_over_time(x):
202202
# Set up the encoder - simple!
203203
encoder_inputs_placeholder = Input(shape=(max_len_input,))
204204
x = embedding_layer(encoder_inputs_placeholder)
205-
encoder = Bidirectional(LSTM(LATENT_DIM, return_sequences=True, dropout=0.5))
205+
encoder = Bidirectional(LSTM(
206+
LATENT_DIM,
207+
return_sequences=True,
208+
# dropout=0.5 # dropout not available on gpu
209+
))
206210
encoder_outputs = encoder(x)
207211

208212

Diff for: nlp_class3/wseq2seq.py

+11-2
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,11 @@
174174
##### build the model #####
175175
encoder_inputs_placeholder = Input(shape=(max_len_input,))
176176
x = embedding_layer(encoder_inputs_placeholder)
177-
encoder = LSTM(LATENT_DIM, return_state=True, dropout=0.5)
177+
encoder = LSTM(
178+
LATENT_DIM,
179+
return_state=True,
180+
# dropout=0.5 # dropout not available on gpu
181+
)
178182
encoder_outputs, h, c = encoder(x)
179183
# encoder_outputs, h = encoder(x) #gru
180184

@@ -192,7 +196,12 @@
192196

193197
# since the decoder is a "to-many" model we want to have
194198
# return_sequences=True
195-
decoder_lstm = LSTM(LATENT_DIM, return_sequences=True, return_state=True, dropout=0.5)
199+
decoder_lstm = LSTM(
200+
LATENT_DIM,
201+
return_sequences=True,
202+
return_state=True,
203+
# dropout=0.5 # dropout not available on gpu
204+
)
196205
decoder_outputs, _, _ = decoder_lstm(
197206
decoder_inputs_x,
198207
initial_state=encoder_states

0 commit comments

Comments
 (0)