Skip to content
Permalink
Browse files

added documentation to the deepsea model class

  • Loading branch information...
kathyxchen committed Sep 28, 2018
1 parent 8b4f597 commit ef18ab20a47295e268894fcef966bc2b1b56a19d
Showing with 15 additions and 14 deletions.
  1. +15 −14 models/deepsea.py
@@ -1,23 +1,18 @@
"""DeepSEA architecture (Zhou & Troyanskaya, 2015).
"""
DeepSEA architecture (Zhou & Troyanskaya, 2015).
"""
import numpy as np
import torch
import torch.nn as nn


class DeepSEA(nn.Module):
def __init__(self, window_size, n_genomic_features):
def __init__(self, sequence_length, n_genomic_features):
"""
Parameters
----------
window_size : int
sequence_length : int
n_genomic_features : int
Attributes
----------
conv_net : torch.nn.Sequential
n_channels : int
classifier : torch.nn.Sequential
"""
super(DeepSEA, self).__init__()
conv_kernel_size = 8
@@ -28,32 +23,29 @@ def __init__(self, window_size, n_genomic_features):
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.BatchNorm1d(320),
nn.Dropout(p=0.2),

nn.Conv1d(320, 480, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.BatchNorm1d(480),
nn.Dropout(p=0.2),

nn.Conv1d(480, 960, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.BatchNorm1d(960),
nn.Dropout(p=0.5))

reduce_by = conv_kernel_size - 1
pool_kernel_size = float(pool_kernel_size)
self.n_channels = int(
np.floor(
(np.floor(
(window_size - reduce_by) / pool_kernel_size)
(sequence_length - reduce_by) / pool_kernel_size)
- reduce_by) / pool_kernel_size)
- reduce_by)
self.classifier = nn.Sequential(
nn.Linear(960 * self.n_channels, n_genomic_features),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_genomic_features),
nn.Linear(n_genomic_features, n_genomic_features),
nn.Sigmoid())

@@ -66,8 +58,17 @@ def forward(self, x):
return predict

def criterion():
"""
The criterion the model aims to minimize.
"""
return nn.BCELoss()

def get_optimizer(lr):
"""
The optimizer and the parameters with which to initialize the optimizer.
At a later time, we initialize the optimizer by also passing in the model
parameters (`model.parameters()`). We cannot initialize the optimizer
until the model has been initialized.
"""
return (torch.optim.SGD,
{"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})

0 comments on commit ef18ab2

Please sign in to comment.
You can’t perform that action at this time.