Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
vinayakumarr committed Feb 1, 2018
1 parent 68e8724 commit 09cc18e
Show file tree
Hide file tree
Showing 24 changed files with 1,397 additions and 0 deletions.
5 changes: 5 additions & 0 deletions heartsound/1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import pandas as pd
import csv
with open('1.csv', 'rb') as f:
reader = csv.reader(f)
your_list = list(reader)
13 changes: 13 additions & 0 deletions heartsound/dataseta/artifact.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import os
from scipy.io import wavfile
import numpy as np

with file('trainartifact.csv', 'w') as outfile:
for file in os.listdir('atrain/Atraining_artifact/Atraining_artifact'):

if file.endswith(".wav"):

rate, wf = wavfile.read('atrain/Atraining_artifact/Atraining_artifact/'+ file)
print(rate)
np.savetxt(outfile, [wf],fmt='%01d',delimiter=',')
#print(wf.shape)
13 changes: 13 additions & 0 deletions heartsound/dataseta/extrastole.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import os
from scipy.io import wavfile
import numpy as np

with file('extrastole.csv', 'w') as outfile:
for file in os.listdir('atrain/Atraining_extrahs/Atraining_extrahls'):

if file.endswith(".wav"):

rate, wf = wavfile.read('atrain/Atraining_extrahs/Atraining_extrahls/'+ file)
print(rate)
np.savetxt(outfile, [wf],fmt='%01d',delimiter=',')

77 changes: 77 additions & 0 deletions heartsound/dataseta/ml/across.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility

from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.layers import Embedding
from keras.layers import Convolution1D,MaxPooling1D, Flatten
from keras.datasets import imdb
from keras import backend as K
from sklearn.cross_validation import train_test_split
import pandas as pd
from keras.utils.np_utils import to_categorical

from sklearn.preprocessing import Normalizer
from keras.models import Sequential
from keras.layers import Convolution1D, Dense, Dropout, Flatten, MaxPooling1D
from keras.utils import np_utils
import numpy as np
import h5py
from keras import callbacks
from keras.layers import LSTM, GRU, SimpleRNN
from keras.callbacks import CSVLogger
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
import csv
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier

with open('dataa.csv', 'rb') as f:
reader = csv.reader(f)
your_list = list(reader)

trainX = np.array(your_list)

traindata = pd.read_csv('dataalabels.csv', header=None)
Y = traindata.iloc[:,0]
y_train1 = np.array(Y)
y_train= to_categorical(y_train1)

maxlen = 44100
trainX = sequence.pad_sequences(trainX, maxlen=maxlen)

# reshape input to be [samples, time steps, features]
X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))


def create_model():
print("----------------------------------")
model = Sequential()
model.add(GRU(64,input_dim=44100,return_sequences=True))
model.add(Dropout(0.1))
model.add(GRU(64, return_sequences=True))
model.add(Dropout(0.1))
model.add(GRU(64, return_sequences=True))
model.add(Dropout(0.1))
model.add(GRU(64, return_sequences=False))
model.add(Dropout(0.1))
model.add(Dense(4))
model.add(Activation('softmax'))
print("epoch")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model

seed = 7
np.random.seed(seed)

model = KerasClassifier(build_fn=create_model, epochs=50, batch_size=2)

# evaluate using 10-fold cross validation
kfold = StratifiedKFold(y=y_train1, n_folds=10, shuffle=True, random_state=seed)
results = cross_val_score(model, X_train, y_train, cv=kfold)
print(results)
print("results mean")
print(results.mean())

64 changes: 64 additions & 0 deletions heartsound/dataseta/ml/trainandtest/cnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility

from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.layers import Embedding
from keras.layers import Convolution1D,MaxPooling1D, Flatten
from keras.datasets import imdb
from keras import backend as K
from sklearn.cross_validation import train_test_split
import pandas as pd
from keras.utils.np_utils import to_categorical

from sklearn.preprocessing import Normalizer
from keras.models import Sequential
from keras.layers import Convolution1D, GlobalMaxPooling1D,Dense, Dropout, Flatten, MaxPooling1D
from keras.utils import np_utils
import numpy as np
import h5py
from keras import callbacks
from keras.layers import LSTM, GRU, SimpleRNN
from keras.callbacks import CSVLogger
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
import csv
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier

with open('a/traindata.csv', 'rb') as f:
reader = csv.reader(f)
your_list = list(reader)

trainX = np.array(your_list)

traindata = pd.read_csv('a/trainlabels.csv', header=None)
Y = traindata.iloc[:,0]
y_train1 = np.array(Y)
y_train= to_categorical(y_train1)

maxlen = 44100
trainX = sequence.pad_sequences(trainX, maxlen=maxlen)

print(trainX.shape)
# reshape input to be [samples, time steps, features]
X_train = np.reshape(trainX, (trainX.shape[0], trainX.shape[1],1))

batch_size = 2

model = Sequential()
model.add(Convolution1D(128, 3, border_mode="same",activation="relu",input_shape=(44100, 1)))
model.add(MaxPooling1D(pool_size=(2)))
#model.add(Convolution1D(256, 6, border_mode="same",activation="relu"))
#model.add(MaxPooling1D(pool_size=(2)))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(4, activation="softmax"))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="logs/cnnlayer/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='loss')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=50, callbacks=[checkpointer])
model.save("logs/cnnlayer/lstm1layer_model.hdf5")
86 changes: 86 additions & 0 deletions heartsound/dataseta/ml/trainandtest/lstm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility

from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.layers import Embedding
from keras.layers import Convolution1D,MaxPooling1D, Flatten
from keras.datasets import imdb
from keras import backend as K
from sklearn.cross_validation import train_test_split
import pandas as pd
from keras.utils.np_utils import to_categorical

from sklearn.preprocessing import Normalizer
from keras.models import Sequential
from keras.layers import Convolution1D, Dense, Dropout, Flatten, MaxPooling1D
from keras.utils import np_utils
import numpy as np
import h5py
from keras import callbacks
from keras.layers import LSTM, GRU, SimpleRNN
from keras.callbacks import CSVLogger
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
import csv
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier

with open('a/traindata.csv', 'rb') as f:
reader = csv.reader(f)
your_list = list(reader)

trainX = np.array(your_list)

traindata = pd.read_csv('a/trainlabels.csv', header=None)
Y = traindata.iloc[:,0]
y_train1 = np.array(Y)
y_train= to_categorical(y_train1)

maxlen = 2000
trainX = sequence.pad_sequences(trainX, maxlen=maxlen)

# reshape input to be [samples, time steps, features]
X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))



with open('a/testdata.csv', 'rb') as f:
reader1 = csv.reader(f)
your_list1 = list(reader1)

testX = np.array(your_list1)

testdata = pd.read_csv('a/testlabels.csv', header=None)
Y1 = testdata.iloc[:,0]
y_test1 = np.array(Y1)
y_test= to_categorical(y_test1)

maxlen = 2000
testX = sequence.pad_sequences(testX, maxlen=maxlen)

# reshape input to be [samples, time steps, features]
X_test = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))



batch_size = 5

model = Sequential()
model.add(LSTM(256,input_dim=2000,return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(4))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="logs/lstm4layer/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc', mode='max')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=500, validation_data=(X_test, y_test),callbacks=[checkpointer])
model.save("logs/lstm4layer/lstm1layer_model.hdf5")
86 changes: 86 additions & 0 deletions heartsound/dataseta/ml/trainandtest/lstm1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility

from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.layers import Embedding
from keras.layers import Convolution1D,MaxPooling1D, Flatten
from keras.datasets import imdb
from keras import backend as K
from sklearn.cross_validation import train_test_split
import pandas as pd
from keras.utils.np_utils import to_categorical

from sklearn.preprocessing import Normalizer
from keras.models import Sequential
from keras.layers import Convolution1D, Dense, Dropout, Flatten, MaxPooling1D
from keras.utils import np_utils
import numpy as np
import h5py
from keras import callbacks
from keras.layers import LSTM, GRU, SimpleRNN
from keras.callbacks import CSVLogger
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
import csv
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier

with open('a/testdata.csv', 'rb') as f:
reader = csv.reader(f)
your_list = list(reader)

trainX = np.array(your_list)

traindata = pd.read_csv('a/testlabels.csv', header=None)
Y = traindata.iloc[:,0]
y_train1 = np.array(Y)
y_train= to_categorical(y_train1)

maxlen = 44100
trainX = sequence.pad_sequences(trainX, maxlen=maxlen)

# reshape input to be [samples, time steps, features]
X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))



with open('a/testdata.csv', 'rb') as f:
reader1 = csv.reader(f)
your_list1 = list(reader1)

testX = np.array(your_list1)

testdata = pd.read_csv('a/testlabels.csv', header=None)
Y1 = testdata.iloc[:,0]
y_test1 = np.array(Y1)
y_test= to_categorical(y_test1)

maxlen = 44100
testX = sequence.pad_sequences(testX, maxlen=maxlen)

# reshape input to be [samples, time steps, features]
X_test = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))



batch_size = 2

model = Sequential()
model.add(LSTM(32,input_dim=44100,return_sequences=True))
model.add(Dropout(0.1))
model.add(LSTM(32, return_sequences=True))
model.add(Dropout(0.1))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.1))
#model.add(LSTM(512, return_sequences=False))
#model.add(Dropout(0.1))
model.add(Dense(4))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="logs/lstm3layer/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='loss')
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test,y_test),nb_epoch=500, callbacks=[checkpointer])
model.save("logs/lstm3layer/lstm1layer_model.hdf5")
Loading

0 comments on commit 09cc18e

Please sign in to comment.