-
Notifications
You must be signed in to change notification settings - Fork 87
/
Builtins.scala
133 lines (126 loc) · 4 KB
/
Builtins.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
package com.thoughtworks.deeplearning.plugins
/** A plugin that enables all other DeepLearning.scala built-in plugins.
*
* @example When creating a [[Builtins]] from [[com.thoughtworks.feature.Factory]],
*
* {{{
* import com.thoughtworks.feature.Factory
* val hyperparameters = Factory[plugins.Builtins].newInstance()
* }}}
*
* and `import` anything in [[implicits]],
*
* {{{
* import hyperparameters.implicits._
* }}}
*
* then all DeepLearning.scala built-in features should be enabled.
*
* <hr/>
*
* Creating weights:
*
* {{{
* import org.nd4j.linalg.factory.Nd4j
* import org.nd4j.linalg.api.ndarray.INDArray
* }}}
* {{{
* val numberOfInputFeatures = 8
* val numberOfOutputFeatures = 1
* val initialValueOfWeight: INDArray = Nd4j.rand(numberOfInputFeatures, numberOfOutputFeatures)
* val weight: hyperparameters.INDArrayWeight = hyperparameters.INDArrayWeight(initialValueOfWeight)
* }}}
*
* Creating neural network layers,
*
* {{{
* def fullyConnectedLayer(input: INDArray): hyperparameters.INDArrayLayer = {
* input dot weight
* }
* }}}
*
* or loss functions:
*
* {{{
* def hingeLoss(scores: hyperparameters.INDArrayLayer, label: INDArray): hyperparameters.DoubleLayer = {
* hyperparameters.max(0.0, 1.0 - label * scores).sum
* }
* }}}
*
* Training:
* {{{
* import scalaz.std.stream._
* import com.thoughtworks.future._
* import com.thoughtworks.each.Monadic._
* }}}
*
* {{{
* val batchSize = 4
* val numberOfIterations = 10
* val input = Nd4j.rand(batchSize, numberOfInputFeatures)
* val label = Nd4j.rand(batchSize, numberOfOutputFeatures)
* }}}
*
* {{{
* @monadic[Future]
* def train: Future[Stream[Double]] = {
* for (iteration <- (0 until numberOfIterations).toStream) yield {
* hingeLoss(fullyConnectedLayer(input), label).train.each
* }
* }
* }}}
*
* When the training is done,
* the loss of the last iteration should be no more than the loss of the first iteration
*
* {{{
* train.map { lossesByIteration =>
* lossesByIteration.last should be <= lossesByIteration.head
* }
* }}}
*
* @author 杨博 (Yang Bo)
*/
trait Builtins
extends ImplicitsSingleton
with Layers
with Weights
with Logging
with Names
with Operators
with FloatTraining
with FloatLiterals
with FloatWeights
with FloatLayers
with CumulativeFloatLayers
with DoubleTraining
with DoubleLiterals
with DoubleWeights
with DoubleLayers
with CumulativeDoubleLayers
with INDArrayTraining
with INDArrayLiterals
with INDArrayWeights
with INDArrayLayers
with CumulativeINDArrayLayers {
trait ImplicitsApi
extends super[Layers].ImplicitsApi
with super[Weights].ImplicitsApi
with super[Operators].ImplicitsApi
with super[FloatTraining].ImplicitsApi
with super[FloatLiterals].ImplicitsApi
with super[FloatLayers].ImplicitsApi
with super[DoubleTraining].ImplicitsApi
with super[DoubleLiterals].ImplicitsApi
with super[DoubleLayers].ImplicitsApi
with super[INDArrayTraining].ImplicitsApi
with super[INDArrayLiterals].ImplicitsApi
with super[INDArrayLayers].ImplicitsApi
type Implicits <: ImplicitsApi
trait LayerApi extends super[Logging].LayerApi with super[Names].LayerApi { this: Layer =>
}
type Layer <: LayerApi
trait WeightApi extends super[Logging].WeightApi with super[Names].WeightApi { this: Weight =>
}
type Weight <: WeightApi
}