Skip to content

Commit

Permalink
revert previous "culture" fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
daelsepara committed Oct 31, 2018
1 parent cc216e7 commit 1ee757b
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 19 deletions.
12 changes: 6 additions & 6 deletions DeepLearnUI/ManagedCNN.cs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ public void Rand(ManagedArray rand, Random random, int fan_in, int fan_out)
{
for (int x = 0; x < rand.Length(); x++)
{
rand[x] = (random.NextDouble() - (double)1 / 2) * 2 * Math.Sqrt((double)6 / (fan_in + fan_out));
rand[x] = (random.NextDouble() - 0.5) * 2.0 * Math.Sqrt(6.0 / (fan_in + fan_out));
}
}

Expand Down Expand Up @@ -267,7 +267,7 @@ public void BackPropagation(ManagedArray batch)
}

// Loss Function
L = (double)1 / 2 * ManagedMatrix.SquareSum(OutputError) / batch.x;
L = 0.5 * ManagedMatrix.SquareSum(OutputError) / batch.x;

ManagedOps.Free(WeightsTransposed, FeatureVectorDelta);

Expand Down Expand Up @@ -324,7 +324,7 @@ public void BackPropagation(ManagedArray batch)
var Activation = new ManagedArray(xx, yy, false);
var Delta = new ManagedArray(xx, yy, false);

var Scale = ((double)1 / (Layers[l + 1].Scale * Layers[l + 1].Scale));
var Scale = (1.0 / (Layers[l + 1].Scale * Layers[l + 1].Scale));

for (int j = 0; j < Layers[l].Activation.i; j++)
{
Expand Down Expand Up @@ -405,7 +405,7 @@ public void BackPropagation(ManagedArray batch)
ManagedOps.Copy4D3D(atemp, Layers[l - 1].Activation, i);
ManagedMatrix.FlipAll(ftemp, atemp);
ManagedConvolution.Valid(ftemp, dtemp, FeatureMapDelta);
ManagedMatrix.Multiply(FeatureMapDelta, (double)1 / Layers[n - 1].Activation.z);
ManagedMatrix.Multiply(FeatureMapDelta, 1.0 / Layers[n - 1].Activation.z);

ManagedOps.Copy2D4DIJ(Layers[l].DeltaFeatureMap, FeatureMapDelta, i, j);
}
Expand All @@ -426,7 +426,7 @@ public void BackPropagation(ManagedArray batch)
BiasDelta = new ManagedArray(Bias, false);

ManagedMatrix.Multiply(WeightsDelta, OutputDelta, FeatureVectorTransposed);
ManagedMatrix.Multiply(WeightsDelta, (double)1 / Layers[n - 1].Activation.z);
ManagedMatrix.Multiply(WeightsDelta, 1.0 / Layers[n - 1].Activation.z);
ManagedMatrix.Mean(BiasDelta, OutputDelta, 0);

ManagedOps.Free(FeatureVectorTransposed);
Expand Down Expand Up @@ -552,7 +552,7 @@ public void Train(ManagedArray input, ManagedArray output, ConvolutionalNeuralNe
rL.Add(L);
}

rLVal = (double)99 / 100 * rL[rL.Count - 1] + (double)1 / 100 * L;
rLVal = 0.99 * rL[rL.Count - 1] + 0.01 * L;

rL.Add(rLVal);
}
Expand Down
8 changes: 4 additions & 4 deletions DeepLearnUI/ManagedNN.cs
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ public void BackPropagation(ManagedArray training)
Cost /= training.y;
L2 /= training.y;

ManagedMatrix.Multiply(DeltaWji, (double)1 / training.y);
ManagedMatrix.Multiply(DeltaWkj, (double)1 / training.y);
ManagedMatrix.Multiply(DeltaWji, 1.0 / training.y);
ManagedMatrix.Multiply(DeltaWkj, 1.0 / training.y);

// cleanup
ManagedOps.Free(D2, D3, DZ2, InputBias);
Expand All @@ -150,7 +150,7 @@ public void Rand(ManagedArray rand, Random random)
{
for (int x = 0; x < rand.Length(); x++)
{
rand[x] = (random.NextDouble() - (double)1 / 2) * 2;
rand[x] = (random.NextDouble() - 0.5) * 2.0;
}
}

Expand Down Expand Up @@ -215,7 +215,7 @@ public ManagedArray Predict(ManagedArray test, NeuralNetworkOptions opts)
return prediction;
}

public ManagedIntList Classify(ManagedArray test, NeuralNetworkOptions opts, double threshold = (double)1 / 2)
public ManagedIntList Classify(ManagedArray test, NeuralNetworkOptions opts, double threshold = 0.5)
{
Forward(test);

Expand Down
2 changes: 1 addition & 1 deletion DeepLearnUI/NeuralNetworkOptions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public NeuralNetworkOptions()
Nodes = 16; // Hidden layer nodes (j)
Items = 50; // number of input items
Categories = 2; // number of output categories (k)
Tolerance = (double)1 / 1000;
Tolerance = 0.001;
}
}
}
16 changes: 8 additions & 8 deletions DeepLearnUI/Optimize.cs
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@ public FuncOutput(double error, double[] X)
public class Optimize
{
// RHO and SIG are the constants in the Wolfe-Powell conditions
double RHO = (double)1 / 100;
double SIG = (double)1 / 2;
double RHO = 0.01;
double SIG = 0.5;

// don't reevaluate within 0.1 of the limit of the current bracket
double INT = (double)1 / 10;
double INT = 0.1;

// extrapolate maximum 3 times the current bracket
double EXT = 3;
Expand Down Expand Up @@ -242,7 +242,7 @@ public bool Step(Func<double[], FuncOutput> F, double[] X)
if (f2 > f1)
{
// quadratic fit
z2 = z3 - (((double)1 / 2 * d3 * z3 * z3) / (d3 * z3 + f2 - f3));
z2 = z3 - ((0.5 * d3 * z3 * z3) / (d3 * z3 + f2 - f3));
}
else
{
Expand Down Expand Up @@ -321,7 +321,7 @@ public bool Step(Func<double[], FuncOutput> F, double[] X)
if (double.IsNaN(z21) || double.IsInfinity(z21) || z21 < 0)
{
// if we have no upper limit
if (limit < -(double)1 / 2)
if (limit < -0.5)
{
// then extrapolate the maximum amount
z21 = z1 * (EXT - 1);
Expand All @@ -332,14 +332,14 @@ public bool Step(Func<double[], FuncOutput> F, double[] X)
z21 = (limit - z1) / 2;
}
}
else if (limit > -(double)1 / 2 && (z21 + z1 > limit))
else if (limit > -0.5 && (z21 + z1 > limit))
{
// extrapolation beyond limit?

// set to extrapolation limit
z21 = (limit - z1) / 2;
}
else if (limit < -(double)1 / 2 && (z21 + z1 > z1 * EXT))
else if (limit < -0.5 && (z21 + z1 > z1 * EXT))
{
z21 = z1 * (EXT - 1);
}
Expand All @@ -348,7 +348,7 @@ public bool Step(Func<double[], FuncOutput> F, double[] X)
// too close to limit?
z21 = -z3 * INT;
}
else if ((limit > -(double)1 / 2) && (z21 < (limit - z1) * (1 - INT)))
else if ((limit > -0.5) && (z21 < (limit - z1) * (1 - INT)))
{
z21 = (limit - z1) * (1 - INT);
}
Expand Down

0 comments on commit 1ee757b

Please sign in to comment.