From 563e97730c378ca4d010cfe269ebceff80e3b93d Mon Sep 17 00:00:00 2001 From: Jiayu Liu Date: Mon, 29 Jun 2020 15:30:30 +0800 Subject: [PATCH] fix some typo within documentation --- torch/csrc/jit/codegen/cuda/ir_printer.h | 2 +- torch/nn/modules/linear.py | 2 +- torch/nn/modules/loss.py | 4 ++-- torch/nn/modules/module.py | 10 +++++----- torch/nn/quantized/modules/functional_modules.py | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/torch/csrc/jit/codegen/cuda/ir_printer.h b/torch/csrc/jit/codegen/cuda/ir_printer.h index 60c21332edb7e..84f3a2a188add 100644 --- a/torch/csrc/jit/codegen/cuda/ir_printer.h +++ b/torch/csrc/jit/codegen/cuda/ir_printer.h @@ -8,7 +8,7 @@ /* * IRMathPrinter and IRTransformPrinter allow the splitting up of fusion print - * functions. IRMathPrinter as its name implies focuses soley on what tensor + * functions. IRMathPrinter as its name implies focuses solely on what tensor * computations are taking place. Resulting TensorView math will reflect the * series of split/merge/computeAts that have taken place, however these * nodes will not be displayed in what is printed. IRTransformPrinter does not diff --git a/torch/nn/modules/linear.py b/torch/nn/modules/linear.py index 3df7628c93da3..348d42dee58fb 100644 --- a/torch/nn/modules/linear.py +++ b/torch/nn/modules/linear.py @@ -96,7 +96,7 @@ def extra_repr(self) -> str: ) -# This class exists soley for Transformer; it has an annotation stating +# This class exists solely for Transformer; it has an annotation stating # that bias is never None, which appeases TorchScript class _LinearWithBias(Linear): bias: Tensor diff --git a/torch/nn/modules/loss.py b/torch/nn/modules/loss.py index 758a77c17ea51..54796e8a497f5 100644 --- a/torch/nn/modules/loss.py +++ b/torch/nn/modules/loss.py @@ -474,7 +474,7 @@ class BCELoss(_WeightedLoss): However, an infinite term in the loss equation is not desirable for several reasons. For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be - multipying 0 with infinity. Secondly, if we have an infinite loss value, then + multiplying 0 with infinity. Secondly, if we have an infinite loss value, then we would also have an infinite term in our gradient, since :math:`\lim_{x\to 0} \frac{d}{dx} \log (x) = \infty`. This would make BCELoss's backward method nonlinear with respect to :math:`x_n`, @@ -1316,7 +1316,7 @@ class CTCLoss(_Loss): >>> # Initialize random batch of input vectors, for *size = (T,N,C) >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_() >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long) - >>> + >>> >>> # Initialize random batch of targets (0 = blank, 1:C = classes) >>> target_lengths = torch.randint(low=1, high=T, size=(N,), dtype=torch.long) >>> target = torch.randint(low=1, high=C, size=(sum(target_lengths),), dtype=torch.long) diff --git a/torch/nn/modules/module.py b/torch/nn/modules/module.py index 8494c490365ca..f5eb0dba44d3b 100644 --- a/torch/nn/modules/module.py +++ b/torch/nn/modules/module.py @@ -60,7 +60,7 @@ def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHand .. warning :: This adds global state to the `nn.module` module - and it is only intended for debugging/profiling purposes. + and it is only intended for debugging/profiling purposes. The hook will be called every time before :func:`forward` is invoked. It should have the following signature:: @@ -92,7 +92,7 @@ def register_module_forward_hook(hook: Callable[..., None]) -> RemovableHandle: .. warning :: This adds global state to the `nn.module` module - and it is only intended for debugging/profiling purposes. + and it is only intended for debugging/profiling purposes. The hook will be called every time after :func:`forward` has computed an output. It should have the following signature:: @@ -124,7 +124,7 @@ def register_module_backward_hook( .. warning :: This adds global state to the `nn.module` module - and it is only intended for debugging/profiling purposes. + and it is only intended for debugging/profiling purposes. The current implementation will not have the presented behavior for complex :class:`Module` that perform many operations. @@ -977,7 +977,7 @@ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, error_msgs.append('While copying the parameter named "{}", ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}, ' - 'an exception occured : {}.' + 'an exception occurred : {}.' .format(key, param.size(), input_param.size(), ex.args)) elif strict: missing_keys.append(key) @@ -1329,7 +1329,7 @@ def _get_name(self): def extra_repr(self) -> str: r"""Set the extra representation of the module - To print customized extra information, you should reimplement + To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable. """ diff --git a/torch/nn/quantized/modules/functional_modules.py b/torch/nn/quantized/modules/functional_modules.py index eb2902e196aca..d3fa7189e0566 100644 --- a/torch/nn/quantized/modules/functional_modules.py +++ b/torch/nn/quantized/modules/functional_modules.py @@ -6,7 +6,7 @@ class FloatFunctional(torch.nn.Module): - r"""State collector class for float operatitons. + r"""State collector class for float operations. The instance of this class can be used instead of the ``torch.`` prefix for some operations. See example usage below. @@ -84,7 +84,7 @@ def add_relu(self, x, y): class QFunctional(torch.nn.Module): - r"""Wrapper class for quantized operatitons. + r"""Wrapper class for quantized operations. The instance of this class can be used instead of the ``torch.ops.quantized`` prefix. See example usage below.