Skip to content

Commit

Permalink
iris relearn
Browse files Browse the repository at this point in the history
  • Loading branch information
matthieu637 committed May 3, 2012
1 parent 8121428 commit 5c95bee
Showing 1 changed file with 70 additions and 62 deletions.
132 changes: 70 additions & 62 deletions src/articles/digit_reco/handwritten_stop_relearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,58 +11,63 @@
from random import shuffle
from perceptron import PerceptronR0to1
import matplotlib.pyplot as plt
from data import DataFile
from data import DataFile, DataFileR
from utils import index_max
from copy import deepcopy
from random import seed


def newtask(l):
imax = index_max(l)
l[imax] = 0.
l[9 - imax] = 1.

def newtask2(l):
imax = index_max(l)
l[imax] = 0.
imax = imax + 1 if imax != 9 else 0
l[imax] = 1

def newtask3(l):
imax = index_max(l)
l[imax] = 0.
imax = imax - 1 if imax != 0 else 9
l[imax] = 1


if __name__ == '__main__':
mode = MultilayerPerceptron.R0to1
nbr_network = 3
nbr_network = 10
momentum = 0.5
lrate = 0.1
nbEpoch = 3600
nbTry = 10
display_interval = range(nbEpoch)[::50]
seed(10)

#create all networks
networks = [{} for _ in range(nbr_network)]

#create inputs/outputs to learn
# examples = DataFile("digit_handwritten_16.txt", mode)
examples = DataFileR("iris.txt")

nbInputs = len(examples.inputs[0])
nbHidden = 3 + nbInputs//4
nbOutputs = len(examples.outputs[0])

def newtask(l):
imax = index_max(l)
l[imax] = 0.
l[nbOutputs - 1 - imax] = 1.

def newtask2(l):
imax = index_max(l)
l[imax] = 0.
imax = imax + 1 if imax != nbOutputs - 1 else 0
l[imax] = 1

def newtask3(l):
imax = index_max(l)
l[imax] = 0.
imax = imax - 1 if imax != 0 else nbOutputs - 1
l[imax] = 1

for i in range(nbr_network):
first_order = MultilayerPerceptron(16 * 16, 16 * 4, 10, learning_rate=lrate, momentum=momentum, grid=mode)
high_order_10 = MultilayerPerceptron(16 * 4, 16 * 4 * 2, 16 * 16 + 16 * 4 + 10, learning_rate=lrate, momentum=momentum, grid=mode)
first_order = MultilayerPerceptron(nbInputs, nbHidden, nbOutputs, learning_rate=lrate, momentum=momentum, grid=mode)
high_order_10 = MultilayerPerceptron(nbHidden, nbHidden * 2, nbInputs+ nbHidden+ nbOutputs, learning_rate=lrate, momentum=momentum, grid=mode)
control1 = deepcopy(first_order)
control2 = deepcopy(high_order_10)
perceptron = [PerceptronR0to1(16 * 16, lrate, momentum) for _ in range(10)]
perceptron = [PerceptronR0to1(nbInputs, lrate, momentum) for _ in range(nbOutputs)]

networks[i] = {'first_order' : first_order,
'high_order_10' : high_order_10,
'first_order_control': control1,
'high_order_control':control2,
'perceptron' : perceptron}

#create inputs/outputs to learn
examples = DataFile("digit_handwritten_16.txt", mode)

#3 curves
err_plot = {'first_order' : [] ,
'high_order_10' : [],
Expand All @@ -71,7 +76,7 @@ def newtask3(l):
'perceptron' : []}

#learning
for epoch in range(600):
for epoch in range(1000):
err_one_network = {'first_order' : 0. ,
'high_order_10' : 0.,
'first_order_control': 0.,
Expand Down Expand Up @@ -101,7 +106,7 @@ def newtask3(l):
network['high_order_control'].calc_output(
network['first_order_control'].stateHiddenNeurons)

res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(10)]
res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(nbOutputs)]

if(index_max(res) != index_max(examples.outputs[ex])):
err_one_network['perceptron'] += 1
Expand All @@ -112,11 +117,11 @@ def newtask3(l):
if(index_max(network['first_order_control'].stateOutputNeurons) != index_max(examples.outputs[ex])):
err_one_network['first_order_control'] += 1

if(index_max(network['high_order_10'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_10'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order'].stateOutputNeurons)):
err_one_network['high_order_10'] += 1

if(index_max(network['high_order_control'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_control'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order_control'].stateOutputNeurons)):
err_one_network['high_order_control'] += 1

Expand All @@ -131,7 +136,7 @@ def newtask3(l):
network['first_order_control'].train(examples.inputs[ex],
examples.outputs[ex])

[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(10)]
[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(nbOutputs)]


#add plot
Expand All @@ -149,6 +154,7 @@ def newtask3(l):
print(examples.outputs[0])


#learning
for epoch in range(1000):
err_one_network = {'first_order' : 0. ,
'high_order_10' : 0.,
Expand Down Expand Up @@ -178,8 +184,8 @@ def newtask3(l):

network['high_order_control'].calc_output(
network['first_order_control'].stateHiddenNeurons)

res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(10)]
res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(nbOutputs)]

if(index_max(res) != index_max(examples.outputs[ex])):
err_one_network['perceptron'] += 1
Expand All @@ -190,11 +196,11 @@ def newtask3(l):
if(index_max(network['first_order_control'].stateOutputNeurons) != index_max(examples.outputs[ex])):
err_one_network['first_order_control'] += 1

if(index_max(network['high_order_10'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_10'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order'].stateOutputNeurons)):
err_one_network['high_order_10'] += 1

if(index_max(network['high_order_control'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_control'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order_control'].stateOutputNeurons)):
err_one_network['high_order_control'] += 1

Expand All @@ -204,12 +210,13 @@ def newtask3(l):
network['high_order_control'].train(network['first_order_control'].stateHiddenNeurons,
entire_first_order2)

network['first_order_control'].train(examples.inputs[ex], examples.outputs[ex])
[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(10)]
for i in range(len(examples.outputs[ex])):
network['first_order'].outputNeurons[i].train(network['first_order'].stateHiddenNeurons,
examples.outputs[ex][i])
network['first_order'].train(examples.inputs[ex],
examples.outputs[ex])
network['first_order_control'].train(examples.inputs[ex],
examples.outputs[ex])

[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(nbOutputs)]


#add plot
for k in err_plot.keys() :
Expand All @@ -226,6 +233,7 @@ def newtask3(l):
print(examples.outputs[0])


#learning
for epoch in range(1000):
err_one_network = {'first_order' : 0. ,
'high_order_10' : 0.,
Expand Down Expand Up @@ -255,8 +263,8 @@ def newtask3(l):

network['high_order_control'].calc_output(
network['first_order_control'].stateHiddenNeurons)

res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(10)]
res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(nbOutputs)]

if(index_max(res) != index_max(examples.outputs[ex])):
err_one_network['perceptron'] += 1
Expand All @@ -267,11 +275,11 @@ def newtask3(l):
if(index_max(network['first_order_control'].stateOutputNeurons) != index_max(examples.outputs[ex])):
err_one_network['first_order_control'] += 1

if(index_max(network['high_order_10'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_10'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order'].stateOutputNeurons)):
err_one_network['high_order_10'] += 1

if(index_max(network['high_order_control'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_control'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order_control'].stateOutputNeurons)):
err_one_network['high_order_control'] += 1

Expand All @@ -281,20 +289,19 @@ def newtask3(l):
network['high_order_control'].train(network['first_order_control'].stateHiddenNeurons,
entire_first_order2)

network['first_order_control'].train(examples.inputs[ex], examples.outputs[ex])
[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(10)]
for i in range(len(examples.outputs[ex])):
network['first_order'].outputNeurons[i].train(network['first_order'].stateHiddenNeurons,
examples.outputs[ex][i])
network['first_order'].train(examples.inputs[ex],
examples.outputs[ex])
network['first_order_control'].train(examples.inputs[ex],
examples.outputs[ex])

[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(nbOutputs)]


#add plot
for k in err_plot.keys() :
err_plot[k].append(err_one_network[k] / (nbTry * nbr_network))

print(epoch, " err : ", err_plot['first_order'][epoch])




print(examples.outputs[0])
Expand All @@ -305,6 +312,7 @@ def newtask3(l):
print(examples.outputs[0])


#learning
for epoch in range(1000):
err_one_network = {'first_order' : 0. ,
'high_order_10' : 0.,
Expand Down Expand Up @@ -334,8 +342,8 @@ def newtask3(l):

network['high_order_control'].calc_output(
network['first_order_control'].stateHiddenNeurons)

res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(10)]
res = [network['perceptron'][i].calc_output(examples.inputs[ex]) for i in range(nbOutputs)]

if(index_max(res) != index_max(examples.outputs[ex])):
err_one_network['perceptron'] += 1
Expand All @@ -346,11 +354,11 @@ def newtask3(l):
if(index_max(network['first_order_control'].stateOutputNeurons) != index_max(examples.outputs[ex])):
err_one_network['first_order_control'] += 1

if(index_max(network['high_order_10'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_10'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order'].stateOutputNeurons)):
err_one_network['high_order_10'] += 1

if(index_max(network['high_order_control'].stateOutputNeurons[16 * 16 + 16 * 4:16 * 16 + 16 * 4 + 10]) !=
if(index_max(network['high_order_control'].stateOutputNeurons[nbInputs + nbHidden:nbInputs + nbHidden+ nbOutputs]) !=
index_max(network['first_order_control'].stateOutputNeurons)):
err_one_network['high_order_control'] += 1

Expand All @@ -360,22 +368,22 @@ def newtask3(l):
network['high_order_control'].train(network['first_order_control'].stateHiddenNeurons,
entire_first_order2)

network['first_order_control'].train(examples.inputs[ex], examples.outputs[ex])
[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(10)]
for i in range(len(examples.outputs[ex])):
network['first_order'].outputNeurons[i].train(network['first_order'].stateHiddenNeurons,
examples.outputs[ex][i])
network['first_order'].train(examples.inputs[ex],
examples.outputs[ex])
network['first_order_control'].train(examples.inputs[ex],
examples.outputs[ex])

[network['perceptron'][i].train(examples.inputs[ex], examples.outputs[ex][i]) for i in range(nbOutputs)]


#add plot
for k in err_plot.keys() :
err_plot[k].append(err_one_network[k] / (nbTry * nbr_network))

print(epoch, " err : ", err_plot['first_order'][epoch])



display_interval = range(len(err_plot['first_order']))[::50]
#displays errors
plt.plot(display_interval, [err_plot['first_order'][i] for i in display_interval],
label="first-order network",
Expand Down

0 comments on commit 5c95bee

Please sign in to comment.