-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ベストsubのネットワーク構造をinput3層にした場合を試す #17
Labels
experiment
experiment
Comments
実験1Local Score今回: 0.015264159755005106 LB Score今回: 0.01927 Local CVとLBの相関はありそう。 モデル構造hidden_size_list = [
[872,512,512,256,128], # genes
[130,128,128,64,32], # cells
[1007,512,512,256,128], # all
[288,256,256] # last
]
class MultiInputTabularNN(nn.Module):
def __init__(self, cfg):
super(MultiInputTabularNN, self).__init__()
# genes
genes_layer = []
hidden_size = cfg.hidden_size_list[0]
for i in range(len(hidden_size)-1):
genes_layer.append(nn.BatchNorm1d(hidden_size[i]))
genes_layer.append(nn.Dropout(cfg.dropout))
genes_layer.append(nn.utils.weight_norm(nn.Linear(hidden_size[i], hidden_size[i+1])))
genes_layer.append(nn.ELU())
self.genes_block = nn.Sequential(*genes_layer)
# cells
cells_layer = []
hidden_size = cfg.hidden_size_list[1]
for i in range(len(hidden_size)-1):
cells_layer.append(nn.BatchNorm1d(hidden_size[i]))
cells_layer.append(nn.Dropout(cfg.dropout))
cells_layer.append(nn.utils.weight_norm(nn.Linear(hidden_size[i], hidden_size[i+1])))
cells_layer.append(nn.ELU())
self.cells_block = nn.Sequential(*cells_layer)
# all = genes + cells + meta
all_layer = []
hidden_size = cfg.hidden_size_list[2]
for i in range(len(hidden_size)-1):
all_layer.append(nn.BatchNorm1d(hidden_size[i]))
all_layer.append(nn.Dropout(cfg.dropout))
all_layer.append(nn.utils.weight_norm(nn.Linear(hidden_size[i], hidden_size[i+1])))
all_layer.append(nn.ELU())
self.all_block = nn.Sequential(*all_layer)
# full connection
fc_layer = []
hidden_size = cfg.hidden_size_list[3]
for i in range(len(hidden_size)-1):
fc_layer.append(nn.BatchNorm1d(hidden_size[i]))
fc_layer.append(nn.Dropout(cfg.dropout))
fc_layer.append(nn.utils.weight_norm(nn.Linear(hidden_size[i], hidden_size[i+1])))
fc_layer.append(nn.ELU())
fc_layer.append(nn.BatchNorm1d(hidden_size[-1]))
fc_layer.append(nn.Dropout(cfg.dropout))
fc_layer.append(nn.utils.weight_norm(nn.Linear(hidden_size[-1], len(cfg.target_cols))))
self.fc_block = nn.Sequential(*fc_layer)
def forward(self, input_gene, input_cell, input_all):
# gene
x1 = self.genes_block(input_gene)
# cell
x2 = self.cells_block(input_cell)
# all = gene + cell + meta
x3 = self.all_block(input_all)
# concatenate
x = torch.cat([x1, x2, x3], dim=1)
x = self.fc_block(x)
return x |
実験2Local Score今回: 0.015213956350176748 LB Score今回: 0.01924 モデル構造hidden_size_list = [
[872,512,512,256,256], # genes
[256+130,256,256,128,128], # cells
[1007,512,512,256,256], # all
[384,256,256] # last
]
class MultiInputTabularNN(nn.Module):
def __init__(self, cfg):
super(MultiInputTabularNN, self).__init__()
... # 実験1と同じ構造
def forward(self, input_gene, input_cell, input_all):
# gene
x1 = self.genes_block(input_gene)
# cell
input_cell_x1 = torch.cat([x1, input_cell], dim=1)
x2 = self.cells_block(input_cell_x1)
# all = gene + cell + meta
x3 = self.all_block(input_all)
# concatenate
x = torch.cat([x2, x3], dim=1)
x = self.fc_block(x)
return x |
実験3
|
実験まとめ
|
Open
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
ToDo
モデル
こちらのsubで実験したモデルをベースにした
モデルの特徴
The text was updated successfully, but these errors were encountered: