diff --git a/torch/nn/modules/loss.py b/torch/nn/modules/loss.py index 637e7a7d457e5..75e42303c2703 100644 --- a/torch/nn/modules/loss.py +++ b/torch/nn/modules/loss.py @@ -447,8 +447,6 @@ class KLDivLoss(_Loss): same shape as the input. Examples:: - - >>> import torch.nn.functional as F >>> kl_loss = nn.KLDivLoss(reduction="batchmean") >>> # input should be a distribution in the log space >>> input = F.log_softmax(torch.randn(3, 5, requires_grad=True), dim=1)