Permalink
Browse files

save

  • Loading branch information...
1 parent 678b15d commit a9290b082288d3b314e9105f27effa1c617d7071 @matthieu637 committed Apr 26, 2012
@@ -10,7 +10,7 @@
from multilayerp import MultilayerPerceptron
from random import shuffle
import matplotlib.pyplot as plt
-from data import DataFile
+from data import DataFile, DataFileR
from mpl_toolkits.mplot3d import Axes3D
from utils import index_max
@@ -34,39 +34,50 @@ def discretis(ll, nbDiscretized=nbDiscre):
nbr_network = 1
momentum = 0.9
lrate = 0.1
- nbShape = 10
+ nbShape = 3
nbEpoch = 1000
+ nbTry = 10
display_interval = range(nbEpoch)[::6]
+ #create inputs/outputs to learn
+# examples = DataFile("digit_shape.txt", mode)
+
+ examples = DataFileR("iris.txt")
+ momentum = 0.2
+
+ nbInputs = len(examples.inputs[0])
+ nbHidden = 5
+ nbOutputs = len(examples.outputs[0])
+ nbShape = nbOutputs
+
#create all networks
networks = [{} for _ in range(nbr_network)]
for i in range(nbr_network):
- first_order = MultilayerPerceptron(20, 5, 10, learning_rate=lrate, momentum=momentum, grid=mode)
- high_order_10 = MultilayerPerceptron(5, 10, 35, learning_rate=lrate, momentum=momentum, grid=mode)
+ first_order = MultilayerPerceptron(nbInputs, nbHidden, nbOutputs, learning_rate=lrate, momentum=momentum, grid=mode)
+ high_order_10 = MultilayerPerceptron(nbHidden, nbHidden*2, nbInputs+nbHidden+nbOutputs, learning_rate=lrate, momentum=momentum, grid=mode)
networks[i] = {'first_order' : first_order,
'high_order_10' : high_order_10}
- #create inputs/outputs to learn
- examples = DataFile("digit_shape.txt", mode)
- learned = [[0 for _ in range(10)] for _ in range(nbDiscre**5)]
+
+ learned = [[0 for _ in range(nbShape)] for _ in range(nbDiscre**nbHidden)]
#3 curves
dis = [[0 for _ in range(nbEpoch)] for _ in range(nbShape)]
dis2 = [[0 for _ in range(nbEpoch)] for _ in range(nbShape)]
div = [[0 for _ in range(nbEpoch)] for _ in range(nbShape)]
- valid = [[] for _ in range(10)]
+ valid = [[] for _ in range(nbShape)]
#learning
for epoch in range(nbEpoch):
for network in networks:
- l_exx = list(range(nbShape))
+ l_exx = list(range(len(examples.inputs)))
shuffle(l_exx)
- for ex in l_exx:
+ for ex in l_exx[0:nbTry]:
#RMS
network['first_order'].calc_output(examples.inputs[ex])
@@ -76,9 +87,12 @@ def discretis(ll, nbDiscretized=nbDiscre):
network['high_order_10'].calc_output(network['first_order'].stateHiddenNeurons)
- learned[discretis(network['first_order'].stateHiddenNeurons)][ex] += 1
+
im = index_max(examples.outputs[ex])
+
+ learned[discretis(network['first_order'].stateHiddenNeurons)][im] += 1
+
div[im][epoch] += 1
dis[im][epoch] += discretis(network['first_order'].stateHiddenNeurons)
dis2[im][epoch] += index_max(network['first_order'].stateOutputNeurons)
@@ -97,7 +111,7 @@ def discretis(ll, nbDiscretized=nbDiscre):
print(epoch)
#divided by the number of networks
- for i in range(10):
+ for i in range(nbShape):
for j in range(nbEpoch):
if(div[i][j] != 0):
dis[i][j] /= div[i][j]
@@ -107,7 +121,7 @@ def discretis(ll, nbDiscretized=nbDiscre):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
- for j in range(10):
+ for j in range(nbShape):
ax.scatter([dis[j][k] for k in valid[j]], [j] * len(valid[j]), valid[j], color=colors[j], marker='x')
ax.set_xlabel('DISCRETIZED VALUE')
@@ -118,7 +132,7 @@ def discretis(ll, nbDiscretized=nbDiscre):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
- for j in range(10):
+ for j in range(nbShape):
ax.scatter([dis2[j][k] for k in valid[j]], [j] * len(valid[j]), valid[j], color=colors[j], marker='x')
ax.set_xlabel('DISCRETIZED VALUE')
@@ -147,19 +161,19 @@ def discretis(ll, nbDiscretized=nbDiscre):
for j in range(nbShape):
plt.title('shape :%d' % j)
plt.plot(valid[j], [dis[j][k] for k in valid[j]], '.', label="hidden")
- plt.plot(valid[j], [dis2[j][k]*(nbDiscre**5)/10 for k in valid[j]], '.', label="output")
+ plt.plot(valid[j], [dis2[j][k]*(nbDiscre**nbHidden)/nbShape for k in valid[j]], '.', label="output")
plt.legend(loc='best', frameon=False)
plt.show()
stade = 0
for i in range(len(learned)):
r = max(learned[i])
- if(r > 10):
+ if(r > nbShape):
cl = list(learned[i])
cl.remove(r)
- if(max(cl) > 10):
+ if(max(cl) > nbShape):
stade += 1
- plt.bar(range(stade*12+10)[stade*12::], learned[i])
+ plt.bar(range(stade*12+nbShape)[stade*12::], learned[i])
plt.show()
@@ -10,12 +10,12 @@
from multilayerp import MultilayerPerceptron
from random import shuffle
import matplotlib.pyplot as plt
-from data import DataFile
+from data import DataFile, DataFileR
from utils import index_max, compare, compare_f
if __name__ == '__main__':
mode = MultilayerPerceptron.R0to1
- nbr_network = 3
+ nbr_network = 5
momentum = 0.9
lrate = 0.1
nbEpoch = 1000
@@ -24,22 +24,30 @@
display_interval2 = [0, 25, 50, 100, 200, 500, 999]
+ #create inputs/outputs to learn
+# examples = DataFile("digit_shape_16.txt")
+# examples = DataFile("digit_handwritten_16.txt")
+ examples = DataFileR("iris.txt")
+
+ nbInput = len(examples.inputs[0])
+ nbHidden = 16 * 4
+ nbHidden = 5
+ nbOutput = len(examples.outputs[0])
+
#create all networks
networks = [{} for _ in range(nbr_network)]
for i in range(nbr_network):
- first_order = MultilayerPerceptron(16 * 16, 16 * 4, 10, learning_rate=lrate, momentum=momentum, grid=mode)
- high_order_10 = MultilayerPerceptron(16 * 4, 16 * 4 * 2, 16 * 16 + 16 * 4 + 10, learning_rate=lrate, momentum=momentum, grid=mode)
- high_order_5 = MultilayerPerceptron(16 * 4, 16 * 4, 16 * 16 + 16 * 4 + 10, learning_rate=lrate, momentum=momentum, grid=mode)
+ first_order = MultilayerPerceptron(nbInput, nbHidden, nbOutput, learning_rate=lrate, momentum=momentum, grid=mode)
+ high_order_10 = MultilayerPerceptron(nbHidden, nbHidden * 2, nbInput + nbHidden + nbOutput, learning_rate=lrate, momentum=momentum, grid=mode)
+ high_order_5 = MultilayerPerceptron(nbHidden, nbHidden, nbInput + nbHidden + nbOutput, learning_rate=lrate, momentum=momentum, grid=mode)
networks[i] = {'first_order' : first_order,
'high_order_10' : high_order_10,
'high_order_5':high_order_5}
- #create inputs/outputs to learn
-# examples = DataFile("digit_shape_16.txt")
- examples = DataFile("digit_handwritten_16.txt")
+
#3 curves
rms_plot = {'first_order' : [] ,
@@ -83,11 +91,11 @@
if(index_max(network['first_order'].stateOutputNeurons) != index_max(examples.outputs[ex])):
err_one_network['first_order'] += 1
- err_one_network['high_order_20'] += 1 - compare(examples.inputs[ex], network['high_order_10'].stateOutputNeurons[0:16 * 16])
- if( not compare_f(network['first_order'].stateHiddenNeurons,
- network['high_order_10'].stateOutputNeurons[16 * 16:16 * 16 + 16 * 4], 0.3) ):
+ err_one_network['high_order_20'] += 1 - compare(examples.inputs[ex], network['high_order_10'].stateOutputNeurons[0:nbInput])
+ if(not compare_f(network['first_order'].stateHiddenNeurons,
+ network['high_order_10'].stateOutputNeurons[nbInput:nbInput + nbHidden], 0.3)):
err_one_network['high_order_5'] += 1
- if(index_max(network['high_order_10'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
+ if(index_max(network['high_order_10'].stateOutputNeurons[nbInput + nbHidden:nbInput + nbHidden + nbOutput]) !=
index_max(network['first_order'].stateOutputNeurons)):
err_one_network['high_order_10'] += 1
@@ -99,9 +107,9 @@
#add plot
- rms_plot['first_order'].append(sum_rms['first_order']/ (nbTry * nbr_network))
- rms_plot['high_order_10'].append(sum_rms['high_order_10']/ (nbTry * nbr_network))
- rms_plot['high_order_5'].append(sum_rms['high_order_5']/ (nbTry * nbr_network))
+ rms_plot['first_order'].append(sum_rms['first_order'] / (nbTry * nbr_network))
+ rms_plot['high_order_10'].append(sum_rms['high_order_10'] / (nbTry * nbr_network))
+ rms_plot['high_order_5'].append(sum_rms['high_order_5'] / (nbTry * nbr_network))
err_plot['first_order'].append(err_one_network['first_order'] / (nbTry * nbr_network))
err_plot['high_order_10'].append(err_one_network['high_order_10'] / (nbTry * nbr_network))
@@ -14,31 +14,38 @@
import matplotlib.pyplot as plt
from data import DataFile, DataFileR
+
+
if __name__ == '__main__':
mode = MultilayerPerceptron.R0to1
nbr_network = 5
momentum = 0.5
lrate = 0.15
- nbEpoch = 600
+ nbEpoch = 400
nbTry = 20
display_interval = range(nbEpoch)[::6]
+
+ #create inputs/outputs to learn
+ examples = DataFileR("iris.txt", mode)
+# examples = DataFile("digit_handwritten_16.txt", mode)
+
+ nbInputs = len(examples.inputs[0])
+ nbOutputs = len(examples.outputs[0])
+
+
#create all networks
networks = [{} for _ in range(nbr_network)]
for i in range(nbr_network):
- first_order = [PerceptronR0to1(4, learning_rate=lrate, momentum=momentum,
- temperature=1., init_w_randomly=True, enable_bias=True) for _ in range(3)]
- high_order_10 = MultilayerPerceptron(4, 40, 3, learning_rate=lrate, momentum=momentum, grid=mode)
-
+ first_order = [PerceptronR0to1(nbInputs, learning_rate=lrate, momentum=momentum,
+ temperature=1., init_w_randomly=True, enable_bias=True) for _ in range(nbOutputs)]
+ high_order_10 = MultilayerPerceptron(nbInputs, 3+nbInputs//4, nbOutputs, learning_rate=lrate, momentum=momentum, grid=mode)
+ high_order_10.init_weights_randomly(-1, 1)
networks[i] = {'first_order' : first_order,
'high_order_10' : high_order_10}
-
- #create inputs/outputs to learn
- examples = DataFileR("iris.txt", mode)
-# examples = DataFile("digit_handwritten_16.txt", mode)
-
+
#3 curves
err_plot = {'first_order' : [] ,
'high_order_10' : []}
@@ -53,32 +60,19 @@
shuffle(l_exx)
for ex in l_exx[0:nbTry]:
resf1 = [network['first_order'][i].calc_output(examples.inputs[ex])
- for i in range(3)]
+ for i in range(nbOutputs)]
network['high_order_10'].calc_output(examples.inputs[ex])
-
-# ww = []
-#
-# for i in range(10):
-# ww.extend(network['first_order'][i].weights)
-#
-# resh = [network['high_order_10'][i].calc_output(ww)
-# for i in range(10)]
-
+
if(index_max(resf1) != index_max(examples.outputs[ex])):
err_one_network['first_order'] += 1
if(index_max(network['high_order_10'].stateOutputNeurons) != index_max(examples.outputs[ex])):
err_one_network['high_order_10'] += 1
-# if(index_max(resh) != index_max(resf1)):
-# err_one_network['high_order_10'] += 1
- #learn
-# for i in range(10):
-# network['high_order_10'][i].train(ww, resf1[i])
network['high_order_10'].train(examples.inputs[ex],
examples.outputs[ex])
- for i in range(3):
+ for i in range(nbOutputs):
network['first_order'][i].train(examples.inputs[ex],
examples.outputs[ex][i])
@@ -156,12 +156,12 @@ def ampli(l, n):
networks = [{} for _ in range(nbr_network)]
for i in range(nbr_network):
- control = MultilayerPerceptron(16 * 16, 100, 10, learning_rate=0.15, momentum=momentum, grid=mode,
- temperature=1, random=False, enable_bias=True)
+ seed(i)
+ control = MultilayerPerceptron(16 * 16, 100, 10, learning_rate=0.15, momentum=momentum, grid=mode)
control.init_weights_randomly(-1, 1)
- first_order = AdHock(control)
high_order_h = MultilayerPerceptron(100, 20, 2, learning_rate=0.1, momentum=0., grid=mode)
+ first_order = AdHock(control)
# high_order_h.init_weights_randomly(-1, 1)
networks[i] = {'first_order' : first_order,
@@ -179,6 +179,8 @@ def ampli(l, n):
'control': [],
'diff': []}
+ seed(100)
+
#learning
for epoch in range(nbEpoch):
perfo = {'first_order' : 0. ,
@@ -240,9 +242,9 @@ def ampli(l, n):
print("score : ", sum(y_perfo['diff']) / len(y_perfo['diff']))
plt.title("Feedback by merging")
- plt.plot(display_interval , y_perfo['first_order'][3::5], label="first-order network", linewidth=1)
- plt.plot(display_interval , y_perfo['high_order_h'][3::5], label="high-order network (high learning rate)")
- plt.plot(display_interval , y_perfo['wager_proportion'][3::5], label="proportion of high wagers")
+# plt.plot(display_interval , y_perfo['first_order'][3::5], label="first-order network", linewidth=1)
+# plt.plot(display_interval , y_perfo['high_order_h'][3::5], label="high-order network (high learning rate)")
+# plt.plot(display_interval , y_perfo['wager_proportion'][3::5], label="proportion of high wagers")
plt.plot(display_interval , y_perfo['control'][3::5], label="control network", linewidth=2)
plt.plot(display_interval , y_perfo['feedback'][3::5], label="feedback", linewidth=2)
plt.ylabel('SUCCESS RATIO')
@@ -8,7 +8,7 @@
from multilayerp import MultilayerPerceptron
from utils import index_max, index_max_nth
-from random import shuffle
+from random import shuffle, seed
import matplotlib.pyplot as plt
from data import DataFile
@@ -24,13 +24,11 @@
networks = [{} for _ in range(nbr_network)]
for i in range(nbr_network):
+ seed(i)
first_order = MultilayerPerceptron(16 * 16, 100, 10, learning_rate=0.15, momentum=momentum, grid=mode)
- high_order_h = MultilayerPerceptron(100, 100, 10, learning_rate=0.1, momentum=0.5, grid=mode)
-
-
first_order.init_weights_randomly(-1, 1)
- high_order_h.init_weights_randomly(-1, 1)
+ high_order_h = MultilayerPerceptron(100, 100, 10, learning_rate=0.1, momentum=0.5, grid=mode)
networks[i] = {'first_order' : first_order,
'high_order_h' : high_order_h}
@@ -49,6 +47,8 @@
max2 = [0 for _ in range(nbEpoch)]
div = [0 for _ in range(nbEpoch)]
+ seed(100)
+
#learning
for epoch in range(nbEpoch):
perfo = {'first_order' : 0. ,
@@ -74,7 +74,7 @@
if(index_max(network['first_order'].stateOutputNeurons) == index_max(examples.outputs[ex])):
perfo['first_order'] += 1
max2[epoch] += max(network['first_order'].stateOutputNeurons)
- print(max(network['first_order'].stateOutputNeurons))
+# print(max(network['first_order'].stateOutputNeurons))
div[epoch] += 1
perfo['max'] += max(network['first_order'].stateOutputNeurons)
@@ -172,11 +172,11 @@
plt.title("Performance of first-order and higher-order networks with feedback ( nth W-T-A )")
plt.plot(display_interval , y_perfo['first_order'][3::5], label="first-order network", linewidth=2)
- plt.plot(display_interval , y_perfo['wager_proportion'][3::5], label="proportion of high wagers")
+# plt.plot(display_interval , y_perfo['wager_proportion'][3::5], label="proportion of high wagers")
plt.plot(display_interval , y_perfo['feedback'][3::5], label="feedback", linewidth=2)
- plt.plot(display_interval , y_perfo['high_order_h'][3::5], label="high-order network (high learning rate)")
- plt.plot(display_interval , y_perfo['max'][3::5], label="most active neuron", linewidth=2)
- plt.plot(display_interval , max2[3::5], label="most active neuron (good answer)", linewidth=2)
+# plt.plot(display_interval , y_perfo['high_order_h'][3::5], label="high-order network (high learning rate)")
+# plt.plot(display_interval , y_perfo['max'][3::5], label="most active neuron", linewidth=2)
+# plt.plot(display_interval , max2[3::5], label="most active neuron (good answer)", linewidth=2)
plt.ylabel('SUCCESS RATIO')
plt.xlabel("EPOCHS")
plt.axis((0, nbEpoch, 0, 1.))
Oops, something went wrong.

0 comments on commit a9290b0

Please sign in to comment.