/
convolution.go
128 lines (122 loc) · 3.1 KB
/
convolution.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
package layers
import (
"github.com/lon9/exmat"
mymat "github.com/lon9/mat"
"gonum.org/v1/gonum/mat"
)
// ConvolutionLayer is layer of Convolution.
type ConvolutionLayer struct {
*BaseLayer
NInput uint32
NOutput uint32
KernelSize int
Stride int
Padding int
Weights [][][][]float32
Bias []float32
BiasTerm bool
}
// NewConvolutionLayer is constructor.
func NewConvolutionLayer(name, t string, nInput, nOutput uint32, kernelSize, stride, padding int, biasTerm bool) *ConvolutionLayer {
w := make([][][][]float32, nOutput)
for i := 0; i < int(nOutput); i++ {
w[i] = make([][][]float32, nInput)
for j := 0; j < int(nInput); j++ {
w[i][j] = make([][]float32, kernelSize)
for k := 0; k < int(kernelSize); k++ {
w[i][j][k] = make([]float32, kernelSize)
}
}
}
return &ConvolutionLayer{
BaseLayer: NewBaseLayer(name, t),
NInput: nInput,
NOutput: nOutput,
KernelSize: kernelSize,
Stride: stride,
Padding: padding,
Weights: w,
BiasTerm: biasTerm,
}
}
// Forward forwards a step.
func (conv *ConvolutionLayer) Forward(input [][][]float32) ([][][]float32, error) {
if conv.Padding > 0 {
doneCh := make(chan bool, len(input))
for i := range input {
go func(i int, doneCh chan bool) {
in := ConvertMatrix(input[i])
inExMat := exmat.NewExMatFromDense(in)
input[i] = ConvertMat64(inExMat.ZeroPadding(conv.Padding))
doneCh <- true
}(i, doneCh)
}
for i := 0; i < len(input); i++ {
<-doneCh
}
close(doneCh)
}
in := ConvertMatrix(Im2Col(input, conv.KernelSize, conv.Stride))
kernels := make([][]float32, conv.NOutput)
doneCh := make(chan bool, conv.NOutput)
for i := 0; i < int(conv.NOutput); i++ {
go func(i int, doneCh chan bool) {
kernels[i] = Im2Col(conv.Weights[i], conv.KernelSize, conv.Stride)[0]
doneCh <- true
}(i, doneCh)
}
for i := 0; i < int(conv.NOutput); i++ {
<-doneCh
}
close(doneCh)
kernelMatrix := ConvertMatrix(kernels)
var out mat.Dense
out.Mul(in, kernelMatrix.T())
output := make([][][]float32, conv.NOutput)
rows := (len(input[0])-conv.KernelSize)/conv.Stride + 1
cols := (len(input[0][0])-conv.KernelSize)/conv.Stride + 1
outTransposed := out.T()
r, c := outTransposed.Dims()
errCh := make(chan error, r)
for i := 0; i < r; i++ {
go func(i int, errCh chan error) {
part := make([][]float32, 1)
part[0] = make([]float32, c)
for j := 0; j < c; j++ {
part[0][j] = float32(outTransposed.At(i, j))
}
res, err := mymat.NewMatrix(part).Reshape(uint(rows), uint(cols))
if err != nil {
errCh <- err
return
}
output[i] = res.M
errCh <- nil
}(i, errCh)
}
for i := 0; i < r; i++ {
if err := <-errCh; err != nil {
return nil, err
}
}
close(errCh)
if conv.BiasTerm {
doneCh := make(chan bool, len(output))
for i := range output {
go func(idx int) {
m := ConvertMatrix(output[idx])
var res mat.Dense
res.Apply(func(i, j int, v float64) float64 {
return v + float64(conv.Bias[idx])
}, m)
output[idx] = ConvertMat64(&res)
doneCh <- true
}(i)
}
for range output {
<-doneCh
}
close(doneCh)
}
return output, nil
}