/
BaseOutputLayer.java
390 lines (334 loc) · 13.7 KB
/
BaseOutputLayer.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
/*-
*
* * Copyright 2015 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.deeplearning4j.nn.layers;
import org.deeplearning4j.eval.Evaluation;
import org.deeplearning4j.nn.api.MaskState;
import org.deeplearning4j.nn.api.layers.IOutputLayer;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.gradient.DefaultGradient;
import org.deeplearning4j.nn.gradient.Gradient;
import org.deeplearning4j.nn.params.DefaultParamInitializer;
import org.deeplearning4j.optimize.Solver;
import org.nd4j.base.Preconditions;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.api.DataSet;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.lossfunctions.ILossFunction;
import org.nd4j.linalg.primitives.Pair;
import org.nd4j.linalg.util.FeatureUtil;
import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr;
import org.deeplearning4j.nn.workspace.ArrayType;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Output layer with different objective
* in co-occurrences for different objectives.
* This includes classification as well as prediction
* @author Adam Gibson
*
*/
public abstract class BaseOutputLayer<LayerConfT extends org.deeplearning4j.nn.conf.layers.BaseOutputLayer>
extends BaseLayer<LayerConfT> implements Serializable, IOutputLayer {
//current input and label matrices
protected INDArray labels;
private transient Solver solver;
private double fullNetworkL1;
private double fullNetworkL2;
protected INDArray inputMaskArray;
protected MaskState inputMaskArrayState;
public BaseOutputLayer(NeuralNetConfiguration conf) {
super(conf);
}
public BaseOutputLayer(NeuralNetConfiguration conf, INDArray input) {
super(conf, input);
}
/** Compute score after labels and input have been set.
* @param fullNetworkL1 L1 regularization term for the entire network
* @param fullNetworkL2 L2 regularization term for the entire network
* @param training whether score should be calculated at train or test time (this affects things like application of
* dropout, etc)
* @return score (loss function)
*/
@Override
public double computeScore(double fullNetworkL1, double fullNetworkL2, boolean training, LayerWorkspaceMgr workspaceMgr) {
if (input == null || labels == null)
throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
this.fullNetworkL1 = fullNetworkL1;
this.fullNetworkL2 = fullNetworkL2;
INDArray preOut = preOutput2d(training, workspaceMgr);
ILossFunction lossFunction = layerConf().getLossFn();
double score = lossFunction.computeScore(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut,
layerConf().getActivationFn(), maskArray,false);
score += fullNetworkL1 + fullNetworkL2;
score /= getInputMiniBatchSize();
this.score = score;
return score;
}
/**Compute the score for each example individually, after labels and input have been set.
*
* @param fullNetworkL1 L1 regularization term for the entire network (or, 0.0 to not include regularization)
* @param fullNetworkL2 L2 regularization term for the entire network (or, 0.0 to not include regularization)
* @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
*/
@Override
public INDArray computeScoreForExamples(double fullNetworkL1, double fullNetworkL2, LayerWorkspaceMgr workspaceMgr) {
if (input == null || labels == null)
throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
INDArray preOut = preOutput2d(false, workspaceMgr);
ILossFunction lossFunction = layerConf().getLossFn();
INDArray scoreArray =
lossFunction.computeScoreArray(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM),
preOut, layerConf().getActivationFn(), maskArray);
double l1l2 = fullNetworkL1 + fullNetworkL2;
if (l1l2 != 0.0) {
scoreArray.addi(l1l2);
}
return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, scoreArray);
}
@Override
public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) {
if (input == null || labels == null)
return;
INDArray preOut = preOutput2d(true, workspaceMgr);
Pair<Gradient, INDArray> pair = getGradientsAndDelta(preOut, workspaceMgr);
this.gradient = pair.getFirst();
score = computeScore(fullNetworkL1, fullNetworkL2, true, workspaceMgr);
}
@Override
protected void setScoreWithZ(INDArray z) {
throw new RuntimeException("Not supported - " + layerId());
}
@Override
public Pair<Gradient, Double> gradientAndScore() {
return new Pair<>(gradient(), score());
}
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
assertInputSet(true);
Pair<Gradient, INDArray> pair = getGradientsAndDelta(preOutput2d(true, workspaceMgr), workspaceMgr); //Returns Gradient and delta^(this), not Gradient and epsilon^(this-1)
INDArray delta = pair.getSecond();
INDArray w = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, true, workspaceMgr);
INDArray epsilonNext = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, new int[]{w.size(0), delta.size(0)}, 'f');
epsilonNext = w.mmuli(delta.transpose(), epsilonNext).transpose();
//Normally we would clear weightNoiseParams here - but we want to reuse them for forward + backward + score
// So this is instead done in MultiLayerNetwork/CompGraph backprop methods
return new Pair<>(pair.getFirst(), epsilonNext);
}
/**
* Gets the gradient from one training iteration
* @return the gradient (bias and weight matrix)
*/
@Override
public Gradient gradient() {
return gradient;
}
/** Returns tuple: {Gradient,Delta,Output} given preOut */
private Pair<Gradient, INDArray> getGradientsAndDelta(INDArray preOut, LayerWorkspaceMgr workspaceMgr) {
ILossFunction lossFunction = layerConf().getLossFn();
INDArray labels2d = getLabels2d(workspaceMgr, ArrayType.BP_WORKING_MEM);
//INDArray delta = lossFunction.computeGradient(labels2d, preOut, layerConf().getActivationFunction(), maskArray);
INDArray delta = lossFunction.computeGradient(labels2d, preOut, layerConf().getActivationFn(), maskArray);
Gradient gradient = new DefaultGradient();
INDArray weightGradView = gradientViews.get(DefaultParamInitializer.WEIGHT_KEY);
Nd4j.gemm(input, delta, weightGradView, true, false, 1.0, 0.0); //Equivalent to: weightGradView.assign(input.transpose().mmul(delta));
gradient.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGradView);
if(hasBias()){
INDArray biasGradView = gradientViews.get(DefaultParamInitializer.BIAS_KEY);
delta.sum(biasGradView, 0); //biasGradView is initialized/zeroed first in sum op
gradient.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGradView);
}
delta = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, delta);
return new Pair<>(gradient, delta);
}
@Override
public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr workspaceMgr) {
setInput(input, workspaceMgr);
return activate(training, workspaceMgr);
}
/**
* Sets the input and labels and returns a score for the prediction
* wrt true labels
*
* @param data the data to score
* @return the score for the given input,label pairs
*/
@Override
public double f1Score(DataSet data) {
return f1Score(data.getFeatures(), data.getLabels());
}
/**
* Returns the f1 score for the given examples.
* Think of this to be like a percentage right.
* The higher the number the more it got right.
* This is on a scale from 0 to 1.
*
* @param examples te the examples to classify (one example in each row)
* @param labels the true labels
* @return the scores for each ndarray
*/
@Override
public double f1Score(INDArray examples, INDArray labels) {
Evaluation eval = new Evaluation();
eval.eval(labels, labelProbabilities(examples));
return eval.f1();
}
/**
* Returns the number of possible labels
*
* @return the number of possible labels for this classifier
*/
@Override
public int numLabels() {
return labels.size(1);
}
@Override
public void fit(DataSetIterator iter) {
while (iter.hasNext())
fit(iter.next());
}
/**
* Returns the predictions for each example in the dataset
* @param input the matrix to predict
* @return the prediction for the dataset
*/
@Override
public int[] predict(INDArray input) {
INDArray output = activate(input, false, LayerWorkspaceMgr.noWorkspacesImmutable());
int[] ret = new int[input.rows()];
for (int i = 0; i < ret.length; i++)
ret[i] = Nd4j.getBlasWrapper().iamax(output.getRow(i));
return ret;
}
/**
* Return predicted label names
*
* @param dataSet to predict
* @return the predicted labels for the dataSet
*/
@Override
public List<String> predict(DataSet dataSet) {
int[] intRet = predict(dataSet.getFeatures());
List<String> ret = new ArrayList<>();
for (int i : intRet) {
ret.add(i, dataSet.getLabelName(i));
}
return ret;
}
/**
* Returns the probabilities for each label
* for each example row wise
*
* @param examples the examples to classify (one example in each row)
* @return the likelihoods of each example and each label
*/
@Override
public INDArray labelProbabilities(INDArray examples) {
return activate(examples, false, LayerWorkspaceMgr.noWorkspacesImmutable());
}
/**
* Fit the model
*
* @param input the examples to classify (one example in each row)
* @param labels the example labels(a binary outcome matrix)
*/
@Override
public void fit(INDArray input, INDArray labels) {
throw new UnsupportedOperationException("Not supported");
}
/**
* Fit the model
*
* @param data the data to train on
*/
@Override
public void fit(DataSet data) {
fit(data.getFeatures(), data.getLabels());
}
/**
* Fit the model
*
* @param examples the examples to classify (one example in each row)
* @param labels the labels for each example (the number of labels must match
*/
@Override
public void fit(INDArray examples, int[] labels) {
INDArray outcomeMatrix = FeatureUtil.toOutcomeMatrix(labels, numLabels());
fit(examples, outcomeMatrix);
}
@Override
public void clear() {
super.clear();
labels = null;
solver = null;
inputMaskArrayState = null;
inputMaskArray = null;
fullNetworkL1 = 0.0;
fullNetworkL2 = 0.0;
}
/**
* Fit the model to the given data
*
* @param data the data to fit the model to
*/
@Override
public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) {
//no-op
}
@Override
public INDArray getLabels() {
return labels;
}
public void setLabels(INDArray labels) {
this.labels = labels;
}
protected INDArray preOutput2d(boolean training, LayerWorkspaceMgr workspaceMgr) {
return preOutput(training, workspaceMgr);
}
@Override
protected void applyMask(INDArray to) {
//For output layers: can be either per-example masking, or per-
if (maskArray.isColumnVector()) {
to.muliColumnVector(maskArray);
} else if (Arrays.equals(to.shape(), maskArray.shape())) {
to.muli(maskArray);
} else {
throw new IllegalStateException("Invalid mask array: per-example masking should be a column vector, "
+ "per output masking arrays should be the same shape as the output/labels arrays. Mask shape: "
+ Arrays.toString(maskArray.shape()) + ", output shape: " + Arrays.toString(to.shape())
+ layerId());
}
}
protected INDArray getLabels2d(LayerWorkspaceMgr workspaceMgr, ArrayType arrayType) {
Preconditions.checkArgument(labels != null, "Labels are null");
if (labels.rank() > 2) {
return workspaceMgr.leverageTo(arrayType, labels.reshape(labels.size(2), labels.size(1)));
}
return labels;
}
@Override
public boolean isPretrainLayer() {
return false;
}
@Override
public boolean hasBias() {
return layerConf().hasBias();
}
}