Skip to content

Commit

Permalink
Fixed normalization scheme for nanoparticle design
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelkim314 authored and iguanaus committed Feb 21, 2019
1 parent 4aa32e2 commit 02610c9
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 23 deletions.
12 changes: 6 additions & 6 deletions demo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,22 @@
# This file trains all the models presented here.

echo "python scatter_net.py --data data/8_layer_tio2 --output_folder results/8_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 250 --percent_val .2 --patience 10"
python scatter_net.py --data data/8_layer_tio2 --output_folder results/8_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 250 --percent_val .2 --patience 10
#python scatter_net.py --data data/8_layer_tio2 --output_folder results/8_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 250 --percent_val .2 --patience 10

echo "python scatter_net.py --data data/7_layer_tio2 --output_folder results/7_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 225 --percent_val .2 --patience 10"
python scatter_net.py --data data/7_layer_tio2 --output_folder results/7_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 225 --percent_val .2 --patience 10
#python scatter_net.py --data data/7_layer_tio2 --output_folder results/7_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 225 --percent_val .2 --patience 10

echo "python scatter_net.py --data data/6_layer_tio2 --output_folder results/6_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 225 --percent_val .2 --patience 10"
python scatter_net.py --data data/6_layer_tio2 --output_folder results/6_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 225 --percent_val .2 --patience 10
#python scatter_net.py --data data/6_layer_tio2 --output_folder results/6_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 225 --percent_val .2 --patience 10

echo "python scatter_net.py --data data/5_layer_tio2 --output_folder results/5_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 200 --percent_val .2 --patience 10"
python scatter_net.py --data data/5_layer_tio2 --output_folder results/5_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 200 --percent_val .2 --patience 10

echo "python scatter_net.py --data data/4_layer_tio2 --output_folder results/4_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 125 --percent_val .2 --patience 10"
python scatter_net.py --data data/4_layer_tio2 --output_folder results/4_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 125 --percent_val .2 --patience 10
#python scatter_net.py --data data/4_layer_tio2 --output_folder results/4_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 125 --percent_val .2 --patience 10

echo "python scatter_net.py --data data/3_layer_tio2 --output_folder results/3_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 100 --percent_val .2 --patience 10"
python scatter_net.py --data data/3_layer_tio2 --output_folder results/3_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 100 --percent_val .2 --patience 10
#python scatter_net.py --data data/3_layer_tio2 --output_folder results/3_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 100 --percent_val .2 --patience 10

echo "python scatter_net.py --data data/2_layer_tio2 --output_folder results/2_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 100 --percent_val .2 --patience 10"
python scatter_net.py --data data/2_layer_tio2 --output_folder results/2_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 100 --percent_val .2 --patience 10
#python scatter_net.py --data data/2_layer_tio2 --output_folder results/2_layer_tio2 --n_batch 100 --numEpochs 5000 --lr_rate .0006 --lr_decay .99 --num_layers 4 --n_hidden 100 --percent_val .2 --patience 10
2 changes: 2 additions & 0 deletions results/5_layer_tio2/spec_file_0.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
50.0280975,50.02805,49.9758875,49.94867,50.0279125
11.5489555277,11.5311451815,11.5590914365,11.55339027,11.5392245252
22 changes: 13 additions & 9 deletions scatter_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ def gen_data_first(data,test_file,data_folder=None):
def design_spectrum(data,reuse_weights,output_folder,weight_name_save,weight_name_load,n_batch,numEpochs,lr_rate,lr_decay,num_layers,n_hidden,percent_val,patienceLimit,compare,sample_val,spect_to_sample,matchSpectrum,match_test_file,designSpectrum,design_test_file):
print(num_layers)
train_X, train_Y, max_val, min_val = gen_data_first(data,design_test_file)
spec_file_name = output_folder + "/spec_file_0" + ".txt"
data = np.loadtxt(spec_file_name, delimiter=",")
x_mean = data[0, :]
x_std = data[1, :]

x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
y_size = train_Y.shape[1] # Number of outcomes (3 iris flowers)
Expand All @@ -62,27 +66,27 @@ def design_spectrum(data,reuse_weights,output_folder,weight_name_save,weight_nam
X = tf.get_variable(name="b1", initializer=init_list_rand)
y = tf.placeholder("float", shape=[None, y_size])
print(num_layers)
weights, biases = load_weights(output_folder,weight_name_load,num_layers)
weights, biases = load_weights(output_folder, weight_name_load, num_layers)
print(num_layers)
print(type(num_layers))
#This is the lambda list
lambdaList = range(400,801,2)
myl = np.array(lambdaList)
newL = 1.0/(myl*myl*3).astype(np.float32)
# Forward propagation
yhat = forwardprop(X, weights,biases,num_layers,minLimit=30,maxLimit=70)
X_norm = (X - x_mean) / x_std
yhat = forwardprop(X_norm, weights,biases, num_layers, minLimit=(30-x_mean)/x_std, maxLimit=(70-x_mean)/x_std)
# This will scale by the wavelength
yhat = tf.multiply(yhat,newL)
# Backward propagation
topval = tf.abs(tf.matmul(y,tf.transpose(tf.abs(yhat)))) #This will select all the values that we want.
botval = tf.abs(tf.matmul(tf.abs(y-1),tf.transpose(tf.abs(yhat)))) #This will get the values that we do not want.
topval = tf.abs(tf.matmul(y,tf.transpose(tf.abs(yhat)))) # This will select all the values that we want.
botval = tf.abs(tf.matmul(tf.abs(y-1),tf.transpose(tf.abs(yhat)))) # This will get the values that we do not want.
cost = topval/botval#botval/topval#topval#/botval
#optimizer = tf.train.RMSPropOptimizer(learning_rate=lr_rate, decay=lr_decay).minimize(cost,var_list=[X])
# optimizer = tf.train.RMSPropOptimizer(learning_rate=lr_rate, decay=lr_decay).minimize(cost,var_list=[X])
global_step = tf.Variable(0, trainable=False)


learning_rate = tf.train.exponential_decay(lr_rate,global_step,1000,lr_decay,staircase=False)
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr_rate).minimize(cost,global_step=global_step,var_list=[X])
learning_rate = tf.train.exponential_decay(lr_rate,global_step,1000,lr_decay, staircase=False)
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr_rate).minimize(cost, global_step=global_step, var_list=[X])

# Run SGD
with tf.Session() as sess:
Expand All @@ -99,7 +103,7 @@ def design_spectrum(data,reuse_weights,output_folder,weight_name_save,weight_nam

sess.run(optimizer, feed_dict={y: train_Y})
loss = sess.run(cost,feed_dict={y:train_Y})
cum_loss += loss
cum_loss += loss
step += 1
cost_file.write(str(float(cum_loss))+str("\n"))
print("Step: " + str(step) + " : Loss: " + str(cum_loss) + " : " + str(X.eval()))
Expand Down
16 changes: 8 additions & 8 deletions scatter_net_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,17 @@ def load_weights(output_folder,weight_load_name,num_layers):
biases.append(b_i)
return weights , biases

def forwardprop(X, weights, biases, num_layers,dropout=False,minLimit=None,maxLimit=None):
if (minLimit != None):
X = tf.maximum(X,minLimit)
X = tf.minimum(X,maxLimit)
def forwardprop(X, weights, biases, num_layers, dropout=False, minLimit=None, maxLimit=None):
if minLimit is not None:
X = tf.maximum(X, minLimit)
X = tf.minimum(X, maxLimit)
htemp = None
for i in xrange(0, num_layers):
if i ==0:
htemp = tf.nn.relu(tf.add(tf.matmul(X,weights[i]),biases[i]))
else:
htemp = tf.nn.relu(tf.add(tf.matmul(htemp,weights[i]),biases[i]))
yval = tf.add(tf.matmul(htemp,weights[-1]),biases[-1])
htemp = tf.nn.relu(tf.add(tf.matmul(X, weights[i]), biases[i]))
else:
htemp = tf.nn.relu(tf.add(tf.matmul(htemp, weights[i]), biases[i]))
yval = tf.add(tf.matmul(htemp, weights[-1]), biases[-1])
return yval

#This method reads from the 'X' and 'Y' file and gives in the input as an array of arrays (aka if the input dim is 5 and there are 10 training sets, the input is a 10X 5 array)
Expand Down

0 comments on commit 02610c9

Please sign in to comment.