Skip to content

Commit

Permalink
Final version for running experiments for DCASE 2018. Contains config…
Browse files Browse the repository at this point in the history
…uration for using full development datasets
  • Loading branch information
sidrah-liaqat committed Jul 24, 2018
1 parent 6319eb7 commit 7ca9d71
Showing 1 changed file with 31 additions and 22 deletions.
53 changes: 31 additions & 22 deletions birddet.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
################################################

#checking mfc features
SPECTPATH = '/audio/audio/dataexchange/win_32ms/'
SPECTPATH = '/audio/audio/dataexchange/80fbanks/win_12ms/'
#SPECTPATH = '/audio/audio/workingfiles/spect/'
#SPECTPATH = '/home/sidrah/DL/bulbul2018/workingfiles/spect/'
#SPECTPATH = 'C:\Sidrah\DCASE2018\dataset\spect\'
Expand All @@ -55,29 +55,31 @@
#FILELIST = 'C:\Sidrah\DCASE2018\dataset\filelists'
# create this directory in main project directory

RESULTPATH = 'backup/mfc_model/80fbanks/win_12ms/all3/'
dataset = ['BirdVox-DCASE-20k.csv', 'ff1010bird.csv', 'warblrb10k.csv']
#dataset = (['Chernobyl', 'PolandNFC', 'warblrb10k-eval'])
#features =['h5','mfc']
logfile_name = 'backup/mfc_model/win_32ms/trainedonwarblrb/with_clswt/logfile.log'
checkpoint_model_name = 'backup/mfc_model/win_32ms/trainedonwarblrb/with_clswt/ckpt.h5'
final_model_name = 'backup/mfc_model/win_32ms/trainedonwarblrb/with_clswt/flmdl.h5'

logfile_name = RESULTPATH + 'logfile.log'
checkpoint_model_name = RESULTPATH + 'ckpt.h5'
final_model_name = RESULTPATH + 'flmdl.h5'

BATCH_SIZE = 16
EPOCH_SIZE = 8
AUGMENT_SIZE = 8
EPOCH_SIZE = 40
AUGMENT_SIZE = 1
with_augmentation = False
features='mfc'
model_operation = 'new'
# model_operations : 'new', 'load', 'test'
shape = (624, 160)
#shape = (1669, 160)
expected_shape = (624, 160)
#shape = (624, 160)
shape = (1669, 80)
expected_shape = (1669, 80)
spect = np.zeros(shape)
label = np.zeros(1)

# Callbacks for logging during epochs
reduceLR = ReduceLROnPlateau(factor=0.2, patience=5, min_lr=0.00001)
checkPoint = ModelCheckpoint(filepath = checkpoint_model_name, save_best_only=True) # monitor = 'val_acc', mode = 'max'
checkPoint = ModelCheckpoint(filepath = checkpoint_model_name, monitor= 'val_acc', mode = 'max', save_best_only=True)
csvLogger = CSVLogger(logfile_name, separator=',', append=False)
#earlyStopping = EarlyStopping(patience=5)

Expand Down Expand Up @@ -106,19 +108,26 @@
d_warblr = {k_VAL_FILE: 'val_W', k_TEST_FILE: 'test_W', k_TRAIN_FILE: 'train_W',
k_VAL_SIZE: 400.0, k_TEST_SIZE: 1200.0, k_TRAIN_SIZE: 6400.0,
k_CLASS_WEIGHT: {0: 0.75, 1: 0.25}}
#k_CLASS_WEIGHT: {0: 0.5, 1: 0.5}}
d_freefield = {k_VAL_FILE: 'val_F', k_TEST_FILE: 'test_F', k_TRAIN_FILE: 'train_F',
k_VAL_SIZE: 385.0, k_TEST_SIZE: 1153.0, k_TRAIN_SIZE: 6152.0,
k_CLASS_WEIGHT: {0: 0.25, 1: 0.75}}
#k_CLASS_WEIGHT: {0: 0.5, 1: 0.5}}
d_fold1 = {k_VAL_FILE: 'test_W', k_TEST_FILE: 'val_1', k_TRAIN_FILE: 'train_W',
k_CLASS_WEIGHT: {0: 0.25, 1: 0.75}}
d_fold1 = {k_VAL_FILE: 'test_BF', k_TEST_FILE: 'val_1', k_TRAIN_FILE: 'train_BF',
k_VAL_SIZE: 4153.0, k_TEST_SIZE: 8000.0, k_TRAIN_SIZE: 22152.0,
k_CLASS_WEIGHT: {0: 0.43, 1: 0.57}}
d_fold2 = {k_VAL_FILE: 'test_WF', k_TEST_FILE: 'val_2', k_TRAIN_FILE: 'train_WF',
k_VAL_SIZE: 2353.0, k_TEST_SIZE: 20000.0, k_TRAIN_SIZE: 12552.0,
k_CLASS_WEIGHT: {0: 0.50, 1: 0.50}}
d_fold3 = {k_VAL_FILE: 'test_BW', k_TEST_FILE: 'val_3', k_TRAIN_FILE: 'train_BW',
k_VAL_SIZE: 4200.0, k_TEST_SIZE: 7690.0, k_TRAIN_SIZE: 22400.0,
k_CLASS_WEIGHT: {0: 0.57, 1: 0.43}}
d_all3 = {k_VAL_FILE: 'val_BWF', k_TEST_FILE: 'test', k_TRAIN_FILE: 'train_BWF',
k_VAL_SIZE: 1785.0, k_TEST_SIZE: 12620.0, k_TRAIN_SIZE: 33905.0,
k_CLASS_WEIGHT: {0: 0.50, 1: 0.50}}
# Declare the training, validation, and testing sets here using the dictionaries defined above.
# Set these variables to change the data set.
training_set = d_warblr
validation_set = d_warblr
test_set = d_warblr
training_set = d_all3
validation_set = d_all3
test_set = d_all3

# Grab the file lists and sizes from the corresponding data sets.
train_filelist = FILELIST + training_set[k_TRAIN_FILE]
Expand Down Expand Up @@ -185,7 +194,7 @@ def data_generator(filelistpath, batch_size=16, shuffle=False):
htk_reader = HTKFile()
htk_reader.load(SPECTPATH + file_id[:-4] + '.mfc')
imagedata = np.array(htk_reader.data)
imagedata = imagedata / 18.0
imagedata = imagedata / 17.0

imagedata = np.reshape(imagedata, (1, imagedata.shape[0], imagedata.shape[1], 1))

Expand Down Expand Up @@ -256,7 +265,7 @@ def dataval_generator(filelistpath, batch_size=32, shuffle=False):
htk_reader = HTKFile()
htk_reader.load(SPECTPATH + file_id[:-4] + '.mfc')
imagedata = np.array(htk_reader.data)
imagedata = imagedata/18.0
imagedata = imagedata/17.0

# processing files with shapes other than expected shape in warblr dataset

Expand Down Expand Up @@ -331,7 +340,7 @@ def testdata(filelistpath, test_size):

file_id = filenames[image_index].rstrip()

label_batch[image_index, :] = labels_dict[file_id]
label_batch[image_index, :] = labels_dict[file_id[:-4]]

outputs = [label_batch]

Expand Down Expand Up @@ -367,7 +376,7 @@ def testdata(filelistpath, test_size):
#preprocessing_function

# convolution layers
model.add(Conv2D(16, (3, 3), padding='valid', input_shape=(624, 160, 1), )) # low: try different kernel_initializer
model.add(Conv2D(16, (3, 3), padding='valid', input_shape=(1669, 80, 1), )) # low: try different kernel_initializer
model.add(BatchNormalization()) # explore order of Batchnorm and activation
model.add(LeakyReLU(alpha=.001))
model.add(MaxPooling2D(pool_size=(3, 3))) # experiment with using smaller pooling along frequency axis
Expand Down Expand Up @@ -398,7 +407,7 @@ def testdata(filelistpath, test_size):
model.add(Dense(1, activation='sigmoid'))

elif model_operation == 'load' or model_operation == 'test':
model = load_model('backup/mfc_model/win_32ms/trainedonwarblr/with_clswt/ckpt.h5')
model = load_model(RESULTPATH + 'flmdl.h5')

if model_operation == 'new' or model_operation == 'load':
adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
Expand Down

0 comments on commit 7ca9d71

Please sign in to comment.