Permalink
Browse files

Update after the fire.

  • Loading branch information...
haehn committed Jun 26, 2018
1 parent 7d379e2 commit a005e2bf3fde1bde1af3f6068c34ef824efd51bf
Showing with 2,961 additions and 130 deletions.
  1. BIN .DS_Store
  2. +339 −0 EXP/run_position_length_from_scratch_more_data.py
  3. +348 −0 EXP/run_regression_from_scratch_multi.py
  4. +163 −0 IPY/Figure1_From_Scratch_MULTI_SLURMS.ipynb
  5. +39 −9 IPY/Figure1_From_Scratch_SLURMS.ipynb
  6. +1,783 −0 IPY/UserStudy.ipynb
  7. +3 −3 SLURMS/Figure1_From_Scratch/C.Figure1.angle_0_VGG19_True.sbatch
  8. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.angle_0_XCEPTION_True.sbatch
  9. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.angle_1_VGG19_True.sbatch
  10. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.angle_1_XCEPTION_True.sbatch
  11. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.angle_2_VGG19_True.sbatch
  12. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.angle_2_XCEPTION_True.sbatch
  13. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.area_0_VGG19_True.sbatch
  14. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.area_0_XCEPTION_True.sbatch
  15. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.area_1_VGG19_True.sbatch
  16. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.area_1_XCEPTION_True.sbatch
  17. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.area_2_VGG19_True.sbatch
  18. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.area_2_XCEPTION_True.sbatch
  19. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_0_VGG19_True.sbatch
  20. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_0_XCEPTION_True.sbatch
  21. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_1_VGG19_True.sbatch
  22. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_1_XCEPTION_True.sbatch
  23. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_2_VGG19_True.sbatch
  24. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_2_XCEPTION_True.sbatch
  25. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_3_VGG19_True.sbatch
  26. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.curvature_3_XCEPTION_True.sbatch
  27. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.direction_0_VGG19_True.sbatch
  28. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.direction_0_XCEPTION_True.sbatch
  29. +28 −0 SLURMS/Figure1_From_Scratch/C.Figure1.direction_10_VGG19_True_9.sbatch
  30. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.direction_1_VGG19_True.sbatch
  31. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.direction_1_XCEPTION_True.sbatch
  32. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.direction_2_VGG19_True.sbatch
  33. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.direction_2_XCEPTION_True.sbatch
  34. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_0_VGG19_True.sbatch
  35. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_0_XCEPTION_True.sbatch
  36. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_1_VGG19_True.sbatch
  37. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_1_XCEPTION_True.sbatch
  38. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_2_VGG19_True.sbatch
  39. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_2_XCEPTION_True.sbatch
  40. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_3_VGG19_True.sbatch
  41. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.length_3_XCEPTION_True.sbatch
  42. +28 −0 SLURMS/Figure1_From_Scratch/C.Figure1.multi_10_VGG19_True_1.sbatch
  43. +28 −0 SLURMS/Figure1_From_Scratch/C.Figure1.multi_10_VGG19_True_9.sbatch
  44. +28 −0 SLURMS/Figure1_From_Scratch/C.Figure1.multi_10_XCEPTION_True_1.sbatch
  45. +28 −0 SLURMS/Figure1_From_Scratch/C.Figure1.multi_10_XCEPTION_True_9.sbatch
  46. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_common_scale_0_VGG19_True.sbatch
  47. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_common_scale_0_XCEPTION_True.sbatch
  48. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_common_scale_1_VGG19_True.sbatch
  49. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_common_scale_1_XCEPTION_True.sbatch
  50. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_common_scale_2_VGG19_True.sbatch
  51. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_common_scale_2_XCEPTION_True.sbatch
  52. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_0_VGG19_True.sbatch
  53. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_0_XCEPTION_True.sbatch
  54. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_1_VGG19_True.sbatch
  55. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_1_XCEPTION_True.sbatch
  56. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_2_VGG19_True.sbatch
  57. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_2_XCEPTION_True.sbatch
  58. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_3_VGG19_True.sbatch
  59. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.position_non_aligned_scale_3_XCEPTION_True.sbatch
  60. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.shading_0_VGG19_True.sbatch
  61. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.shading_0_XCEPTION_True.sbatch
  62. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.shading_1_VGG19_True.sbatch
  63. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.shading_1_XCEPTION_True.sbatch
  64. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.shading_2_VGG19_True.sbatch
  65. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.shading_2_XCEPTION_True.sbatch
  66. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.volume_0_VGG19_True.sbatch
  67. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.volume_0_XCEPTION_True.sbatch
  68. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.volume_1_VGG19_True.sbatch
  69. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.volume_1_XCEPTION_True.sbatch
  70. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.volume_2_VGG19_True.sbatch
  71. +2 −2 SLURMS/Figure1_From_Scratch/C.Figure1.volume_2_XCEPTION_True.sbatch
  72. +28 −0 SLURMS/Figure4_From_Scratch/C.Figure4.multi_VGG19_more_data.sbatch
View
BIN .DS_Store
Binary file not shown.
@@ -0,0 +1,339 @@
from keras import models
from keras import layers
from keras import optimizers
import keras.applications
import keras.callbacks
from keras import backend as K
from keras.utils.np_utils import to_categorical
import sklearn.metrics
import cPickle as pickle
import numpy as np
import os
import sys
import time
import ClevelandMcGill as C
EXPERIMENT = sys.argv[1] # f.e. Typ1
CLASSIFIER = sys.argv[2] # 'LeNet'
NOISE = sys.argv[3] # True
JOB_INDEX = int(sys.argv[4])
#
#
#
print 'Running', EXPERIMENT, 'with', CLASSIFIER, 'Noise:', NOISE, 'Job Index', JOB_INDEX
#
#
# PROCESS SOME FLAGS
#
#
SUFFIX = '_fivetimes.'
if NOISE == 'True':
NOISE = True
SUFFIX = '_fivetimes_noise.'
else:
NOISE = False
if EXPERIMENT == 'C.Figure4.multi':
#
# one classifier with all different types
#
def DATATYPE(data):
'''
'''
choices = ['C.Figure4.data_to_type1', 'C.Figure4.data_to_type2',\
'C.Figure4.data_to_type3', 'C.Figure4.data_to_type4', 'C.Figure4.data_to_type5']
choice = np.random.choice(choices)
return eval(choice)(data)
else:
DATATYPE = eval(EXPERIMENT)
if os.path.abspath('~').startswith('/n/'):
# we are on the cluster
PREFIX = '/n/regal/pfister_lab/PERCEPTION/'
else:
PREFIX = '/home/d/PERCEPTION/'
RESULTS_DIR = PREFIX + 'RESULTS_FROM_SCRATCH/'
OUTPUT_DIR = RESULTS_DIR + EXPERIMENT + '/' + CLASSIFIER + '/'
if not os.path.exists(OUTPUT_DIR):
# here can be a race condition
try:
os.makedirs(OUTPUT_DIR)
except:
print 'Race condition!', os.path.exists(OUTPUT_DIR)
STATSFILE = OUTPUT_DIR + str(JOB_INDEX).zfill(2) + SUFFIX + 'p'
MODELFILE = OUTPUT_DIR + str(JOB_INDEX).zfill(2) + SUFFIX + 'h5'
print 'Working in', OUTPUT_DIR
print 'Storing', STATSFILE
print 'Storing', MODELFILE
if os.path.exists(STATSFILE) and os.path.exists(MODELFILE):
print 'WAIT A MINUTE!! WE HAVE DONE THIS ONE BEFORE!'
sys.exit(0)
#
#
# DATA GENERATION
#
#
train_counter = 0
val_counter = 0
test_counter = 0
train_target = 500000
val_target = 100000
test_target = 100000
train_labels = []
val_labels = []
test_labels = []
X_train = np.zeros((train_target, 100, 100), dtype=np.float32)
y_train = np.zeros((train_target, 5), dtype=np.float32)
X_val = np.zeros((val_target, 100, 100), dtype=np.float32)
y_val = np.zeros((val_target, 5), dtype=np.float32)
X_test = np.zeros((test_target, 100, 100), dtype=np.float32)
y_test = np.zeros((test_target, 5), dtype=np.float32)
t0 = time.time()
all_counter = 0
while train_counter < train_target or val_counter < val_target or test_counter < test_target:
all_counter += 1
data, label = C.Figure4.generate_datapoint()
# print data
pot = np.random.choice(3)
# sometimes we know which pot is right
if label in train_labels:
pot = 0
if label in val_labels:
pot = 1
if label in test_labels:
pot = 2
if pot == 0 and train_counter < train_target:
#
try:
image = DATATYPE(data)
image = image.astype(np.float32)
except:
continue
if label not in train_labels:
train_labels.append(label)
# add noise?
if NOISE:
image += np.random.uniform(0, 0.05,(100,100))
# safe to add to training
X_train[train_counter] = image
y_train[train_counter] = label
train_counter += 1
elif pot == 1 and val_counter < val_target:
#
try:
image = DATATYPE(data)
image = image.astype(np.float32)
except:
continue
if label not in val_labels:
val_labels.append(label)
# add noise?
if NOISE:
image += np.random.uniform(0, 0.05,(100,100))
# safe to add to training
X_val[val_counter] = image
y_val[val_counter] = label
val_counter += 1
elif pot == 2 and test_counter < test_target:
#
try:
image = DATATYPE(data)
image = image.astype(np.float32)
except:
continue
if label not in test_labels:
test_labels.append(label)
# add noise?
if NOISE:
image += np.random.uniform(0, 0.05,(100,100))
# safe to add to training
X_test[test_counter] = image
y_test[test_counter] = label
test_counter += 1
print 'Done', time.time()-t0, 'seconds (', all_counter, 'iterations)'
#
#
#
#
#
# NORMALIZE DATA IN-PLACE (BUT SEPERATELY)
#
#
X_min = X_train.min()
X_max = X_train.max()
y_min = y_train.min()
y_max = y_train.max()
# scale in place
X_train -= X_min
X_train /= (X_max - X_min)
y_train -= y_min
y_train /= (y_max - y_min)
X_val -= X_min
X_val /= (X_max - X_min)
y_val -= y_min
y_val /= (y_max - y_min)
X_test -= X_min
X_test /= (X_max - X_min)
y_test -= y_min
y_test /= (y_max - y_min)
# normalize to -.5 .. .5
X_train -= .5
X_val -= .5
X_test -= .5
print 'memory usage', (X_train.nbytes + X_val.nbytes + X_test.nbytes + y_train.nbytes + y_val.nbytes + y_test.nbytes) / 1000000., 'MB'
#
#
#
#
#
# FEATURE GENERATION
#
#
feature_time = 0
if CLASSIFIER == 'VGG19' or CLASSIFIER == 'XCEPTION':
X_train_3D = np.stack((X_train,)*3, -1)
X_val_3D = np.stack((X_val,)*3, -1)
X_test_3D = np.stack((X_test,)*3, -1)
print 'memory usage', (X_train_3D.nbytes + X_val_3D.nbytes + X_test_3D.nbytes) / 1000000., 'MB'
if CLASSIFIER == 'VGG19':
feature_generator = keras.applications.VGG19(include_top=False, input_shape=(100,100,3))
elif CLASSIFIER == 'XCEPTION':
feature_generator = keras.applications.Xception(include_top=False, input_shape=(100,100,3))
elif CLASSIFIER == 'RESNET50':
print 'Not yet - we need some padding and so on!!!'
sys.exit(1)
t0 = time.time()
#
# THE MLP
#
#
MLP = models.Sequential()
MLP.add(layers.Flatten(input_shape=feature_generator.output_shape[1:]))
MLP.add(layers.Dense(256, activation='relu', input_dim=(100,100,3)))
MLP.add(layers.Dropout(0.5))
MLP.add(layers.Dense(5, activation='linear')) # REGRESSION
model = keras.Model(inputs=feature_generator.input, outputs=MLP(feature_generator.output))
sgd = optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['mse', 'mae']) # MSE for regression
#
#
# TRAINING
#
#
t0 = time.time()
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto'), \
keras.callbacks.ModelCheckpoint(MODELFILE, monitor='val_loss', verbose=1, save_best_only=True, mode='min')]
history = model.fit(X_train_3D, \
y_train, \
epochs=1000, \
batch_size=32, \
validation_data=(X_val_3D, y_val),
callbacks=callbacks,
verbose=True)
fit_time = time.time()-t0
print 'Fitting done', time.time()-t0
#
#
# PREDICTION
#
#
y_pred = model.predict(X_test_3D)
#
#
# CLEVELAND MCGILL ERROR
# MEANS OF LOG ABSOLUTE ERRORS (MLAEs)
#
MLAE = np.log2(sklearn.metrics.mean_absolute_error(y_pred*100, y_test*100)+.125)
#
#
# STORE
# (THE NETWORK IS ALREADY STORED BASED ON THE CALLBACK FROM ABOVE!)
#
stats = dict(history.history)
# 1. the training history
# 2. the y_pred and y_test values
# 3. the MLAE
stats['time'] = feature_time + fit_time
stats['y_test'] = y_test
stats['y_pred'] = y_pred
stats['MLAE'] = MLAE
with open(STATSFILE, 'w') as f:
pickle.dump(stats, f)
print 'MLAE', MLAE
print 'Written', STATSFILE
print 'Written', MODELFILE
print 'Sayonara! All done here.'
Oops, something went wrong.

0 comments on commit a005e2b

Please sign in to comment.