-
Notifications
You must be signed in to change notification settings - Fork 620
/
TabModel.py
69 lines (56 loc) · 2.93 KB
/
TabModel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/063_models.TabModel.ipynb.
# %% auto 0
__all__ = ['TabModel', 'TabBackbone', 'TabHead']
# %% ../../nbs/063_models.TabModel.ipynb 3
from ..imports import *
# from tsai.data.tabular import *
from .layers import *
from .utils import *
# %% ../../nbs/063_models.TabModel.ipynb 4
class TabModel(Sequential): # Sequential accepts multiple inputs
"Basic model for tabular data."
def __init__(self, emb_szs, n_cont, c_out, layers=None, fc_dropout=None, embed_p=0., y_range=None, use_bn=True, bn_final=False, bn_cont=True,
lin_first=False, act=nn.ReLU(inplace=True), skip=False):
# Backbone
backbone = TabBackbone(emb_szs, n_cont, embed_p=embed_p, bn_cont=bn_cont)
# Head
head = TabHead(emb_szs, n_cont, c_out, layers=layers, fc_dropout=fc_dropout, y_range=y_range, use_bn=use_bn, bn_final=bn_final, lin_first=lin_first,
act=act, skip=skip)
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
class TabBackbone(Module):
def __init__(self, emb_szs, n_cont, embed_p=0., bn_cont=True):
self.embeds = nn.ModuleList([Embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(embed_p)
self.bn_cont = nn.BatchNorm1d(n_cont) if bn_cont else None
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont = n_emb,n_cont
def forward(self, x_cat, x_cont=None):
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.bn_cont is not None: x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
return x
class TabHead(Module):
"Basic head for tabular data."
def __init__(self, emb_szs, n_cont, c_out, layers=None, fc_dropout=None, y_range=None, use_bn=True, bn_final=False, lin_first=False,
act=nn.ReLU(inplace=True), skip=False):
# Head
if layers is None: layers = [200,100]
ps = ifnone(fc_dropout, [0]*len(layers))
if not is_listy(ps): ps = [ps]*len(layers)
n_emb = np.sum([emb_dim for _, emb_dim in emb_szs]).astype(int)
sizes = [n_emb + n_cont] + layers + [c_out]
actns = [act for _ in range(len(sizes)-2)] + [None]
_layers = [LinBnDrop(sizes[i], sizes[i+1], bn=use_bn and (i!=len(actns)-1 or bn_final), p=p, act=a) for i,(p,a) in enumerate(zip(ps+[0.],actns))]
if y_range is not None: _layers.append(SigmoidRange(*y_range))
self.head = nn.Sequential(*_layers)
self.head_nf = layers[-1]
self.shortcut = nn.Linear(n_emb + n_cont, c_out) if skip else None
def forward(self, x):
if self.shortcut is not None: res = x
x = self.head(x)
if self.shortcut is not None: x = x + self.shortcut(res)
return x