-
Notifications
You must be signed in to change notification settings - Fork 117
/
torch_module.py
76 lines (59 loc) · 2 KB
/
torch_module.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
"""Implement torch module."""
from __future__ import annotations
import torch
from torch import nn
class _LinearRegressionTorchModel(nn.Module):
"""A Torch module with one linear layer."""
def __init__(
self,
input_size: int,
output_size: int,
use_bias: bool = True,
):
"""Initialize the module.
Args:
input_size (int): Size of each input sample.
output_size (int): Size of each output sample.
use_bias (bool): If set to False, the linear layer will not learn a bias term.
Default to True.
"""
super().__init__()
self.linear = nn.Linear(input_size, output_size, bias=use_bias)
def forward(self, x: torch.Tensor):
"""Compute a linear inference.
Args:
x (torch.tensor): The input data.
Returns:
torch.Tensor: The predictions.
"""
return self.linear(x)
class _CustomLinearRegressionTorchModel(nn.Module):
"""A Torch module with only one custom linear layer.
This module is used for applying the ReduceSum workaround to linear models.
"""
def __init__(
self,
weights,
bias=0.0,
):
"""Initialize the module.
Args:
weights (torch.tensor]): The weights learned by sklearn during to consider during the
inference.
bias (Optional[torch.tensor]): The bias terms learned by sklearn to consider during the
inference. None is no bias has been considered. Default to None.
"""
super().__init__()
self.weights = weights
self.bias = bias
def forward(self, x: torch.Tensor):
"""Compute the inference y = X @ w + b.
Args:
x (torch.tensor): The input data.
Returns:
torch.Tensor: The predictions.
"""
y_pred = x * self.weights
y_pred = y_pred.sum(dim=1, keepdim=True)
y_pred += self.bias
return y_pred