-
Notifications
You must be signed in to change notification settings - Fork 1
/
Neuron.java
107 lines (88 loc) · 2.92 KB
/
Neuron.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
//javaNN.java
import java.lang.Math;
import java.util.ArrayList;
import java.util.Hashtable;
class Neuron{
private double output;
private double output_prime;
public double delta;
private ArrayList<Neuron> inputNeurons = new ArrayList< Neuron > ( );
private ArrayList<Neuron> outputNeurons = new ArrayList< Neuron > ( );
private Hashtable<Neuron, Double> outputWeights = new Hashtable<Neuron, Double>(); // Hashtable containing outgoing-weights keyed by their respective neuron
private double bias; //unimplemented
public Neuron(){
// bias = Math.random()/14.0 - 1.0/28.0;
bias = Math.random() * .01 - .005;
}
public void addOut(Neuron[] outNeurons, Double new_weight){ //holds out-weights
for (Neuron connectNeuron : outNeurons){
if ( connectNeuron == null ){
System.out.println("null");
}
else{
Double weight = Math.random() * .01 - .005; //change weighths here
outputNeurons.add(connectNeuron);
outputWeights.put(connectNeuron, weight);
}
}
}
public void addIn(Neuron[] inNeurons){ //holds no weights
for (Neuron connectNeuron : inNeurons){
inputNeurons.add(connectNeuron);
}
}
public void fire(){
double collectSum = 0;
for (Neuron element : inputNeurons){
collectSum += ( element.getOutput() * element.getWeight(this) );
}
collectSum += bias; // bias always zero currently
output = sigmoid(collectSum);
output_prime = sigmoidPrime(collectSum);
}
public void adjustWeights(double learn_rate){
for (Neuron neuron : outputNeurons){
double calcWeightGrad = neuron.getDelta() * output;
Double currentWeight = outputWeights.get(neuron);
Double nextWeight = currentWeight - (learn_rate * calcWeightGrad);
outputWeights.put(neuron, nextWeight);
}
}
public void adjustBias(double learn_rate){
bias -= delta;
}
public void backpropagate(){ // calculate all deltas
// add errors from layer ahead
double cumDelta = 0;
for (Neuron upperNeuron : outputNeurons){
cumDelta += this.getWeight(upperNeuron) * upperNeuron.getDelta();
}
this.delta = cumDelta * output_prime; //* output_prime; //cum weighted delta times output_prime equals delta
}
public void backpropagate(double target){ // delta given by test example
// add errors from layer ahead
double noGoodName = output - target; //derivative of cost when C = 1/2 (y - out)^2
this.delta = noGoodName * output_prime;
}
public double getOutput(){
return output;
}
public void setOutput(double x){
output = x;
}
public double getOutput_prime(){
return output_prime;
}
public Double getWeight(Neuron upperNeuron){
return outputWeights.get(upperNeuron);
}
public double getDelta(){
return delta;
}
public static double sigmoid(double x){
return 1 / ( 1 + Math.exp( - x ) ); // let me know if this spacing makes you cringe... it depends on what screen I use.
}
public static double sigmoidPrime(double x){
return sigmoid(x) * ( 1 - sigmoid(x) );
}
}