diff --git a/onmt/modules/Embeddings.py b/onmt/modules/Embeddings.py index 83f250f..0a74410 100644 --- a/onmt/modules/Embeddings.py +++ b/onmt/modules/Embeddings.py @@ -2,7 +2,7 @@ import torch.nn as nn from torch.autograd import Variable -from onmt.modules import BottleLinear, Elementwise +from onmt.modules import Elementwise from onmt.Utils import aeq @@ -138,7 +138,7 @@ def __init__(self, word_vec_size, if feat_merge == 'mlp': in_dim = sum(emb_dims) out_dim = word_vec_size - mlp = nn.Sequential(BottleLinear(in_dim, out_dim), nn.ReLU()) + mlp = nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU()) self.make_embedding.add_module('mlp', mlp) if position_encoding: diff --git a/onmt/modules/MultiHeadedAttn.py b/onmt/modules/MultiHeadedAttn.py index 6effc65..6e8dfc7 100644 --- a/onmt/modules/MultiHeadedAttn.py +++ b/onmt/modules/MultiHeadedAttn.py @@ -4,7 +4,6 @@ from torch.autograd import Variable from onmt.Utils import aeq -from onmt.modules.UtilClass import BottleLinear, BottleSoftmax class MultiHeadedAttention(nn.Module):