Skip to content

Commit

Permalink
Using two-convolutional layer neural network and setting w0=3000A.
Browse files Browse the repository at this point in the history
  • Loading branch information
daniel-muthukrishna committed May 28, 2018
1 parent 2f26326 commit 52e72c3
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 10 deletions.
6 changes: 3 additions & 3 deletions dash/create_and_save_all_data_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@


if __name__ == '__main__':
modelName = 'zeroZ_trainOnAll_morehostfracs'
modelName = 'zeroZ_trainOnAll_w0-3000'
trainWithHost = True
classifyHost = False
minZ = 0.
Expand All @@ -38,9 +38,9 @@
f.write("Num of Redshifts: {}\n".format(numOfRedshifts))
f.write("Fraction of Training Set Used: {}\n".format(numOfRedshifts))
f.write("Training Amount: 50 x 500000\n")
f.write("Changed wavelength range to 3000 to 10000A\n")
f.write("Changed wavelength range to 3500 to 10000A\n")
f.write("Set outer region to 0.5\n")
f.write("Added 3rd convolutional layer to neural network\n")
f.write("Using 2 convolutional layers in neural network\n")
dataFilenames.append(modelInfoFilename)

# CREATE PARAMETERS PICKLE FILE
Expand Down
3 changes: 3 additions & 0 deletions dash/create_arrays.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,13 @@ def shuffle_arrays(self, memmapName='', **kwargs):
arrayShuf = np.memmap('shuffled_{}_{}_{}.dat'.format(key, memmapName, self.randnum), dtype=object, mode='w+', shape=arraySize)
kwargShuf[key] = arrayShuf

print("Shuffling...")
# Randomise order
p = np.random.permutation(len(kwargs['labels']))
for key in kwargs:
assert len(kwargs[key]) == arraySize
print(key, "shuffling...")
print(len(p))
kwargShuf[key] = kwargs[key][p]

return kwargShuf
Expand Down
6 changes: 1 addition & 5 deletions dash/deep_learning_multilayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,7 @@ def train_model(dataDirName, overwrite=False):
a = []
x, y_, keep_prob, y_conv = convnet_variables(imWidth, imWidthReduc, N, nLabels)

config = tf.ConfigProto()
config.intra_op_parallelism_threads = 44
config.inter_op_parallelism_threads = 44

with tf.Session(config=config) as sess: # config=tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)) as sess:
with tf.Session() as sess: # config=tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)) as sess:
# TRAIN AND EVALUATE MODEL
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv + 1e-8), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
Expand Down
2 changes: 1 addition & 1 deletion dash/multilayer_convnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def convnet_variables(imWidth, imWidthReduc, N, ntypes):
h_fc2 = Layer3.connect_layers(h_pool3, 64, 2)

# READOUT LAYER
keep_prob, h_fc2_drop = Layer3.dropout(h_fc2)
keep_prob, h_fc2_drop = Layer3.dropout(h_fc1)
W_fc3, b_fc3 = Layer3.readout_layer()
y_conv = tf.nn.softmax(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)

Expand Down
2 changes: 1 addition & 1 deletion dash/training_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def create_training_params_file(dataDirName):
'Ib-norm', 'Ibn', 'IIb', 'Ib-pec', 'Ic-norm', 'Ic-broad',
'Ic-pec', 'IIP', 'IIL', 'IIn', 'II-pec'],
'nTypes': 17,
'w0': 3000., # wavelength range in Angstroms
'w0': 3500., # wavelength range in Angstroms
'w1': 10000.,
'nw': 1024, # number of wavelength bins
'minAge': -20.,
Expand Down

0 comments on commit 52e72c3

Please sign in to comment.