/
toy.py
175 lines (127 loc) · 4.6 KB
/
toy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
"""Contains toy functions and classes for quick prototyping and
testing.
"""
from functools import partial
from torch import nn
class MLPModule(nn.Module):
"""A simple multi-layer perceptron module.
This can be adapted for usage in different contexts, e.g. binary
and multi-class classification, regression, etc.
Parameters
----------
input_units : int (default=20)
Number of input units.
output_units : int (default=2)
Number of output units.
hidden_units : int (default=10)
Number of units in hidden layers.
num_hidden : int (default=1)
Number of hidden layers.
nonlin : torch.nn.Module instance (default=torch.nn.ReLU())
Non-linearity to apply after hidden layers.
output_nonlin : torch.nn.Module instance or None (default=None)
Non-linearity to apply after last layer, if any.
dropout : float (default=0)
Dropout rate. Dropout is applied between layers.
squeeze_output : bool (default=False)
Whether to squeeze output. Squeezing can be helpful if you wish
your output to be 1-dimensional (e.g. for
NeuralNetBinaryClassifier).
"""
def __init__(
self,
input_units=20,
output_units=2,
hidden_units=10,
num_hidden=1,
nonlin=nn.ReLU(),
output_nonlin=None,
dropout=0,
squeeze_output=False,
):
super().__init__()
self.input_units = input_units
self.output_units = output_units
self.hidden_units = hidden_units
self.num_hidden = num_hidden
self.nonlin = nonlin
self.output_nonlin = output_nonlin
self.dropout = dropout
self.squeeze_output = squeeze_output
self.reset_params()
def reset_params(self):
"""(Re)set all parameters."""
units = [self.input_units]
units += [self.hidden_units] * self.num_hidden
units += [self.output_units]
sequence = []
for u0, u1 in zip(units, units[1:]):
sequence.append(nn.Linear(u0, u1))
sequence.append(self.nonlin)
sequence.append(nn.Dropout(self.dropout))
sequence = sequence[:-2]
if self.output_nonlin:
sequence.append(self.output_nonlin)
self.sequential = nn.Sequential(*sequence)
def forward(self, X): # pylint: disable=arguments-differ
X = self.sequential(X)
if self.squeeze_output:
X = X.squeeze(-1)
return X
def make_classifier(output_nonlin=nn.Softmax(dim=-1), **kwargs):
"""Return a multi-layer perceptron to be used with
NeuralNetClassifier.
Parameters
----------
input_units : int (default=20)
Number of input units.
output_units : int (default=2)
Number of output units.
hidden_units : int (default=10)
Number of units in hidden layers.
num_hidden : int (default=1)
Number of hidden layers.
nonlin : torch.nn.Module instance (default=torch.nn.ReLU())
Non-linearity to apply after hidden layers.
dropout : float (default=0)
Dropout rate. Dropout is applied between layers.
"""
return partial(MLPModule, output_nonlin=output_nonlin, **kwargs)
def make_binary_classifier(squeeze_output=True, **kwargs):
"""Return a multi-layer perceptron to be used with
NeuralNetBinaryClassifier.
Parameters
----------
input_units : int (default=20)
Number of input units.
output_units : int (default=2)
Number of output units.
hidden_units : int (default=10)
Number of units in hidden layers.
num_hidden : int (default=1)
Number of hidden layers.
nonlin : torch.nn.Module instance (default=torch.nn.ReLU())
Non-linearity to apply after hidden layers.
dropout : float (default=0)
Dropout rate. Dropout is applied between layers.
"""
return partial(MLPModule, squeeze_output=squeeze_output, **kwargs)
def make_regressor(output_units=1, **kwargs):
"""Return a multi-layer perceptron to be used with
NeuralNetRegressor.
Parameters
----------
input_units : int (default=20)
Number of input units.
output_units : int (default=1)
Number of output units.
hidden_units : int (default=10)
Number of units in hidden layers.
num_hidden : int (default=1)
Number of hidden layers.
nonlin : torch.nn.Module instance (default=torch.nn.ReLU())
Non-linearity to apply after hidden layers.
dropout : float (default=0)
Dropout rate. Dropout is applied between layers.
"""
return partial(MLPModule, output_units=output_units, **kwargs)