You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
have dataset with 41 features and one output which has 23 class
i modified the imdb sentimental classification example lstm for my dataset and it is showing error. please have a look on the attached image.
I am trying to implement
two memory blocks and two cells each
four memory blocks and two cells each
two memory blocks and four cells each
eight memory blocks and four cells each
from future import print_function
import numpy as np
np.random.seed(1337)
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from sklearn.cross_validation import train_test_split
import pandas as pd
max_features = 494021
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
data = pd.read_csv('kddtrain.csv', header=None)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.5))
model.add(LSTM(128, dropout_W=0.5, dropout_U=0.1)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
This issue has been automatically marked as stale because it has not had recent activity. It will be closed after 30 days if no further activity occurs, but feel free to re-open a closed issue if needed.
have dataset with 41 features and one output which has 23 class
i modified the imdb sentimental classification example lstm for my dataset and it is showing error. please have a look on the attached image.
I am trying to implement
from future import print_function
import numpy as np
np.random.seed(1337)
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from sklearn.cross_validation import train_test_split
import pandas as pd
max_features = 494021
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
data = pd.read_csv('kddtrain.csv', header=None)
X = data.iloc[:,1:42]
y = data.iloc[:,0]
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,random_state=42)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print(X_train.shape)
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.5))
model.add(LSTM(128, dropout_W=0.5, dropout_U=0.1)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test), show_accuracy=True)
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size,
show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
The text was updated successfully, but these errors were encountered: