-
Notifications
You must be signed in to change notification settings - Fork 43
/
other.go
151 lines (124 loc) · 3.2 KB
/
other.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
package nn
import (
"github.com/sugarme/gotch/ts"
)
// Dropout:
// ========
// Dropout represents a neural network dropout layer.
type Dropout struct {
dropoutProb float64
}
// NewDropout creates a new Dropout layer
func NewDropout(p float64) *Dropout {
return &Dropout{
dropoutProb: p,
}
}
// ForwardT implements ModuleT for Dropout layer.
func (d *Dropout) ForwardT(input *ts.Tensor, train bool) (retVal *ts.Tensor) {
return ts.MustDropout(input, d.dropoutProb, train)
}
// Parameter:
// ==========
// NewParameter creates a kind of tensor that is considered as a module parameter.
// Ref. https://pytorch.org/docs/stable/generated/torch.nn.parameter.Parameter.html
func NewParameter(path *Path, name string, x *ts.Tensor, requireGradOpt ...bool) *ts.Tensor {
requiredGrad := true
if len(requireGradOpt) > 0 {
requiredGrad = requireGradOpt[0]
}
param := path.MustAdd(name, x, requiredGrad)
return param
}
// Buffer:
// =======
// NewBuffer creates new buffer.
//
// Buffer is different from Parameter as its requiredGrad always false.
// - `o.Persistent` param. Default=true. If `true` buffer variable will be saved when `nn.VarStore.Save()` is called.
//
// Ref.
// - https://github.com/pytorch/pytorch/blob/f71eede85a69caed637008e331f5ac5f5b7717ae/torch/nn/modules/module.py#L275
// - https://discuss.pytorch.org/t/what-is-the-difference-between-register-buffer-and-register-parameter-of-nn-module/32723/2
func NewBuffer(path *Path, name string, x *ts.Tensor, persistentOpt ...bool) *ts.Tensor {
persistent := true
if len(persistentOpt) > 0 {
persistent = persistentOpt[0]
}
opts := []AddOpt{
WithPersistent(persistent),
WithVarType("buffer"),
}
return path.MustAdd(name, x, false, opts...) // requiredGrad always false. Different from parameter.
}
// Identity:
// =========
type Identity struct{}
func (m *Identity) Forward(x *ts.Tensor) *ts.Tensor {
if x == nil {
return nil
}
return x.MustShallowClone()
}
func NewIdentity() *Identity {
return new(Identity)
}
// MaxPool2D:
// ==========
type MaxPool2D struct {
Kernel []int64
Stride []int64
Padding []int64
Dilation []int64
CeilMode bool
}
type MaxPool2DOpts struct {
Stride []int64
Padding []int64
Dilation []int64
CeilMode bool
}
type MaxPool2DOpt func(*MaxPool2DOpts)
func OptStrideMp2D(v []int64) MaxPool2DOpt {
return func(o *MaxPool2DOpts) {
o.Stride = v
}
}
func OptPaddingMp2D(v []int64) MaxPool2DOpt {
return func(o *MaxPool2DOpts) {
o.Padding = v
}
}
func OptDilationMp2D(v []int64) MaxPool2DOpt {
return func(o *MaxPool2DOpts) {
o.Dilation = v
}
}
func OptCeilModeMp2D(v bool) MaxPool2DOpt {
return func(o *MaxPool2DOpts) {
o.CeilMode = v
}
}
func DefaultMaxPool2DOpts() *MaxPool2DOpts {
return &MaxPool2DOpts{
Stride: nil,
Padding: []int64{0, 0},
Dilation: []int64{1, 1},
}
}
func NewMaxPool2D(kernelSize []int64, opts ...MaxPool2DOpt) *MaxPool2D {
o := DefaultMaxPool2DOpts()
for _, opt := range opts {
opt(o)
}
return &MaxPool2D{
Kernel: kernelSize,
Stride: o.Stride,
Padding: o.Padding,
Dilation: o.Dilation,
CeilMode: o.CeilMode,
}
}
func (m *MaxPool2D) Forward(x *ts.Tensor) *ts.Tensor {
return x.MustMaxPool2d(m.Kernel, m.Stride, m.Padding, m.Dilation, m.CeilMode, false)
}