From 04bbb2b805ae7c7fffaf65cbc7ea7bed71656a54 Mon Sep 17 00:00:00 2001 From: lixinyu Date: Thu, 20 Aug 2020 10:21:11 -0700 Subject: [PATCH] remove empty override pretty_print [ghstack-poisoned] --- test/cpp/api/transformer.cpp | 20 ++++++----------- .../torch/nn/modules/transformerlayer.h | 3 --- torch/csrc/api/src/nn/modules/transformer.cpp | 22 ------------------- 3 files changed, 7 insertions(+), 38 deletions(-) diff --git a/test/cpp/api/transformer.cpp b/test/cpp/api/transformer.cpp index 85fdf36b3177..8fdf16c0f243 100644 --- a/test/cpp/api/transformer.cpp +++ b/test/cpp/api/transformer.cpp @@ -442,26 +442,20 @@ TEST_F(TransformerTest, TransformerDecoderLayer_gelu_CUDA) { TEST_F(TransformerTest, PrettyPrintTransformerDecoderLayer) { ASSERT_EQ( c10::str(TransformerDecoderLayer(4, 2)), - "(\n" + "torch::nn::TransformerDecoderLayerImpl(\n" " (self_attn): torch::nn::MultiheadAttention(\n" - " (out_proj): torch::nn::Linear(" - "in_features=4, out_features=4, bias=true)\n" + " (out_proj): torch::nn::Linear(in_features=4, out_features=4, bias=true)\n" " )\n" " (dropout1): torch::nn::Dropout(p=0.1, inplace=false)\n" - " (norm1): torch::nn::LayerNorm([4], eps=1e-05," - " elementwise_affine=true)\n" + " (norm1): torch::nn::LayerNorm([4], eps=1e-05, elementwise_affine=true)\n" " (multihead_attn): torch::nn::MultiheadAttention(\n" - " (out_proj): torch::nn::Linear(" - "in_features=4, out_features=4, bias=true)\n" + " (out_proj): torch::nn::Linear(in_features=4, out_features=4, bias=true)\n" " )\n" " (dropout2): torch::nn::Dropout(p=0.1, inplace=false)\n" - " (norm2): torch::nn::LayerNorm([4], eps=1e-05, " - "elementwise_affine=true)\n" - " (linear1): torch::nn::Linear(" - "in_features=4, out_features=2048, bias=true)\n" + " (norm2): torch::nn::LayerNorm([4], eps=1e-05, elementwise_affine=true)\n" + " (linear1): torch::nn::Linear(in_features=4, out_features=2048, bias=true)\n" " (dropout): torch::nn::Dropout(p=0.1, inplace=false)\n" - " (linear2): torch::nn::Linear(" - "in_features=2048, out_features=4, bias=true)\n" + " (linear2): torch::nn::Linear(in_features=2048, out_features=4, bias=true)\n" " (dropout3): torch::nn::Dropout(p=0.1, inplace=false)\n" " (norm3): torch::nn::LayerNorm([4], eps=1e-05, elementwise_affine=true)\n" ")"); diff --git a/torch/csrc/api/include/torch/nn/modules/transformerlayer.h b/torch/csrc/api/include/torch/nn/modules/transformerlayer.h index aad9cb924c88..db003c224552 100644 --- a/torch/csrc/api/include/torch/nn/modules/transformerlayer.h +++ b/torch/csrc/api/include/torch/nn/modules/transformerlayer.h @@ -114,9 +114,6 @@ class TORCH_API TransformerDecoderLayerImpl : public Cloneablereset_parameters(); } -// By default contained sub-modules are printed. -// TODO: The pretty print needs to be implemented along with python -// implementation, currently using the default print. -void TransformerDecoderLayerImpl::pretty_print(std::ostream& stream) const { - // stream << "(d_model=" << options.d_model() - // << ", nhead=" << options.nhead() - // << ", dim_feedforward=" << options.dim_feedforward() - // << ", dropout=" << options.dropout() - // << ", self_attn=torch::nn::MultiheadAttention()" - // << ", dropout1=torch::nn::Dropout()" - // << ", norm1=torch::nn::LayerNorm()" - // << ", multihead_attn=torch::nn::MultiheadAttention()" - // << ", dropout2=torch::nn::Dropout()" - // << ", norm2=torch::nn::LayerNorm()" - // << ", linear1=torch::nn::Linear()" - // << ", dropout=torch::nn::Dropout()" - // << ", linear2=torch::nn::Linear()" - // << ", dropout3=torch::nn::Dropout()" - // << ", norm3=torch::nn::LayerNorm()" - // << ")"; -} - ///Pass the inputs (and mask) through the decoder layer. Tensor TransformerDecoderLayerImpl::forward(Tensor tgt, const Tensor& memory, const Tensor& tgt_mask,