diff --git a/src/edu/stanford/nlp/sentiment/SentimentCostAndGradient.java b/src/edu/stanford/nlp/sentiment/SentimentCostAndGradient.java index a6c1bf2db6..1732687470 100644 --- a/src/edu/stanford/nlp/sentiment/SentimentCostAndGradient.java +++ b/src/edu/stanford/nlp/sentiment/SentimentCostAndGradient.java @@ -2,7 +2,6 @@ import java.util.List; import java.util.Map; -import java.util.Set; import org.ejml.simple.SimpleMatrix; @@ -22,7 +21,8 @@ // TODO: get rid of the word Sentiment everywhere public class SentimentCostAndGradient extends AbstractCachingDiffFunction { - private static Redwood.RedwoodChannels log = Redwood.channels(SentimentCostAndGradient.class); + private static final Redwood.RedwoodChannels log = Redwood.channels(SentimentCostAndGradient.class); + private final SentimentModel model; private final List trainingBatch; @@ -52,7 +52,7 @@ private static double sumError(Tree tree) { } /** - * Returns the index with the highest value in the predictions matrix. + * Returns the index with the highest value in the {@code predictions} matrix. * Indexed from 0. */ private static int getPredictedClass(SimpleMatrix predictions) { @@ -287,7 +287,7 @@ public void calculate(double[] theta) { derivative = NeuralUtils.paramsToVector(theta.length, derivatives.binaryTD.valueIterator(), derivatives.binaryCD.valueIterator(), SimpleTensor.iteratorSimpleMatrix(derivatives.binaryTensorTD.valueIterator()), derivatives.unaryCD.values().iterator(), derivatives.wordVectorD.values().iterator()); } - static double scaleAndRegularize(TwoDimensionalMap derivatives, + private static double scaleAndRegularize(TwoDimensionalMap derivatives, TwoDimensionalMap currentMatrices, double scale, double regCost, boolean dropBiasColumn) { double cost = 0.0; // the regularization cost @@ -305,9 +305,9 @@ static double scaleAndRegularize(TwoDimensionalMap return cost; } - static double scaleAndRegularize(Map derivatives, + private static double scaleAndRegularize(Map derivatives, Map currentMatrices, - double scale, double regCost, + double scale, double regCost, boolean activeMatricesOnly, boolean dropBiasColumn) { double cost = 0.0; // the regularization cost for (Map.Entry entry : currentMatrices.entrySet()) { @@ -330,7 +330,7 @@ static double scaleAndRegularize(Map derivatives, return cost; } - static double scaleAndRegularizeTensor(TwoDimensionalMap derivatives, + private static double scaleAndRegularizeTensor(TwoDimensionalMap derivatives, TwoDimensionalMap currentMatrices, double scale, double regCost) { @@ -486,8 +486,8 @@ private static SimpleTensor getTensorGradient(SimpleMatrix deltaFull, SimpleMatr * useful annotation except when training. */ public void forwardPropagateTree(Tree tree) { - SimpleMatrix nodeVector = null; - SimpleMatrix classification = null; + SimpleMatrix nodeVector; // initialized below or Exception thrown // = null; + SimpleMatrix classification; // initialized below or Exception thrown // = null; if (tree.isLeaf()) { // We do nothing for the leaves. The preterminals will