Skip to content

Commit

Permalink
Fix up a few example
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Jul 17, 2016
1 parent f3e7245 commit 01d5e7b
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 10 deletions.
4 changes: 2 additions & 2 deletions examples/imdb_cnn_lstm.py
Expand Up @@ -22,9 +22,9 @@
embedding_size = 128 embedding_size = 128


# Convolution # Convolution
filter_length = 3 filter_length = 5
nb_filter = 64 nb_filter = 64
pool_length = 2 pool_length = 4


# LSTM # LSTM
lstm_output_size = 70 lstm_output_size = 70
Expand Down
19 changes: 11 additions & 8 deletions examples/lstm_text_generation.py
Expand Up @@ -14,6 +14,7 @@
from keras.models import Sequential from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file from keras.utils.data_utils import get_file
import numpy as np import numpy as np
import random import random
Expand Down Expand Up @@ -50,20 +51,22 @@
# build the model: 2 stacked LSTM # build the model: 2 stacked LSTM
print('Build model...') print('Build model...')
model = Sequential() model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(maxlen, len(chars)))) model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(len(chars))) model.add(Dense(len(chars)))
model.add(Activation('softmax')) model.add(Activation('softmax'))


model.compile(loss='categorical_crossentropy', optimizer='rmsprop') optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)




def sample(a, temperature=1.0): def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array # helper function to sample an index from a probability array
a = np.log(a) / temperature preds = np.asarray(preds).astype('float64')
a = np.exp(a) / np.sum(np.exp(a)) preds = np.log(preds) / temperature
return np.argmax(np.random.multinomial(1, a, 1)) exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)


# train the model, output generated text after each iteration # train the model, output generated text after each iteration
for iteration in range(1, 60): for iteration in range(1, 60):
Expand Down
1 change: 1 addition & 0 deletions keras/preprocessing/text.py
Expand Up @@ -99,6 +99,7 @@ def fit_on_texts(self, texts):
wcounts = list(self.word_counts.items()) wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True) wcounts.sort(key=lambda x: x[1], reverse=True)
sorted_voc = [wc[0] for wc in wcounts] sorted_voc = [wc[0] for wc in wcounts]
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1))))) self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))


self.index_docs = {} self.index_docs = {}
Expand Down

1 comment on commit 01d5e7b

@hdmetor
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I referenced this commit here. I was wondering if the removal of the 2 LSTM is wanted, or just a copy paste from other examples (imdb_lstm maybe?)

Please sign in to comment.