diff --git a/docs/source/tensors.rst b/docs/source/tensors.rst index 85e325f2a7b4..7aa7c6078aaf 100644 --- a/docs/source/tensors.rst +++ b/docs/source/tensors.rst @@ -1,5 +1,7 @@ .. currentmodule:: torch +.. _tensor-doc: + torch.Tensor =================================== diff --git a/torch/_tensor_docs.py b/torch/_tensor_docs.py index cf5a2242cda5..7696704665b5 100644 --- a/torch/_tensor_docs.py +++ b/torch/_tensor_docs.py @@ -25,147 +25,147 @@ def add_docstr_all(method, docstr): add_docstr_all('abs', - """ + r""" abs() -> Tensor See :func:`torch.abs` """) add_docstr_all('abs_', - """ + r""" abs_() -> Tensor In-place version of :meth:`~Tensor.abs` """) add_docstr_all('acos', - """ + r""" acos() -> Tensor See :func:`torch.acos` """) add_docstr_all('acos_', - """ + r""" acos_() -> Tensor In-place version of :meth:`~Tensor.acos` """) add_docstr_all('add', - """ + r""" add(value) See :func:`torch.add` """) add_docstr_all('add_', - """ + r""" add_(value) In-place version of :meth:`~Tensor.add` """) add_docstr_all('addbmm', - """ + r""" addbmm(beta=1, mat, alpha=1, batch1, batch2) -> Tensor See :func:`torch.addbmm` """) add_docstr_all('addbmm_', - """ + r""" addbmm_(beta=1, mat, alpha=1, batch1, batch2) -> Tensor In-place version of :meth:`~Tensor.addbmm` """) add_docstr_all('addcdiv', - """ + r""" addcdiv(value=1, tensor1, tensor2) -> Tensor See :func:`torch.addcdiv` """) add_docstr_all('addcdiv_', - """ + r""" addcdiv_(value=1, tensor1, tensor2) -> Tensor In-place version of :meth:`~Tensor.addcdiv` """) add_docstr_all('addcmul', - """ + r""" addcmul(value=1, tensor1, tensor2) -> Tensor See :func:`torch.addcmul` """) add_docstr_all('addcmul_', - """ + r""" addcmul_(value=1, tensor1, tensor2) -> Tensor In-place version of :meth:`~Tensor.addcmul` """) add_docstr_all('addmm', - """ + r""" addmm(beta=1, mat, alpha=1, mat1, mat2) -> Tensor See :func:`torch.addmm` """) add_docstr_all('addmm_', - """ + r""" addmm_(beta=1, mat, alpha=1, mat1, mat2) -> Tensor In-place version of :meth:`~Tensor.addmm` """) add_docstr_all('addmv', - """ + r""" addmv(beta=1, tensor, alpha=1, mat, vec) -> Tensor See :func:`torch.addmv` """) add_docstr_all('addmv_', - """ + r""" addmv_(beta=1, tensor, alpha=1, mat, vec) -> Tensor In-place version of :meth:`~Tensor.addmv` """) add_docstr_all('addr', - """ + r""" addr(beta=1, alpha=1, vec1, vec2) -> Tensor See :func:`torch.addr` """) add_docstr_all('addr_', - """ + r""" addr_(beta=1, alpha=1, vec1, vec2) -> Tensor In-place version of :meth:`~Tensor.addr` """) add_docstr_all('all', - """ + r""" all() -> bool Returns True if all elements in the tensor are non-zero, False otherwise. """) add_docstr_all('any', - """ + r""" any() -> bool Returns True if any elements in the tensor are non-zero, False otherwise. """) add_docstr_all('apply_', - """ + r""" apply_(callable) -> Tensor Applies the function :attr:`callable` to each element in the tensor, replacing @@ -178,84 +178,84 @@ def add_docstr_all(method, docstr): """) add_docstr_all('asin', - """ + r""" asin() -> Tensor See :func:`torch.asin` """) add_docstr_all('asin_', - """ + r""" asin_() -> Tensor In-place version of :meth:`~Tensor.asin` """) add_docstr_all('atan', - """ + r""" atan() -> Tensor See :func:`torch.atan` """) add_docstr_all('atan2', - """ + r""" atan2(other) -> Tensor See :func:`torch.atan2` """) add_docstr_all('atan2_', - """ + r""" atan2_(other) -> Tensor In-place version of :meth:`~Tensor.atan2` """) add_docstr_all('atan_', - """ + r""" atan_() -> Tensor In-place version of :meth:`~Tensor.atan` """) add_docstr_all('baddbmm', - """ + r""" baddbmm(beta=1, alpha=1, batch1, batch2) -> Tensor See :func:`torch.baddbmm` """) add_docstr_all('baddbmm_', - """ + r""" baddbmm_(beta=1, alpha=1, batch1, batch2) -> Tensor In-place version of :meth:`~Tensor.baddbmm` """) add_docstr_all('bernoulli', - """ + r""" bernoulli() -> Tensor See :func:`torch.bernoulli` """) add_docstr_all('bernoulli_', - """ + r""" bernoulli_() -> Tensor In-place version of :meth:`~Tensor.bernoulli` """) add_docstr_all('bmm', - """ + r""" bmm(batch2) -> Tensor See :func:`torch.bmm` """) add_docstr_all('cauchy_', - """ + r""" cauchy_(median=0, sigma=1, *, generator=None) -> Tensor Fills the tensor with numbers drawn from the Cauchy distribution: @@ -266,176 +266,179 @@ def add_docstr_all(method, docstr): """) add_docstr_all('ceil', - """ + r""" ceil() -> Tensor See :func:`torch.ceil` """) add_docstr_all('ceil_', - """ + r""" ceil_() -> Tensor In-place version of :meth:`~Tensor.ceil` """) add_docstr_all('clamp', - """ + r""" clamp(min, max) -> Tensor See :func:`torch.clamp` """) add_docstr_all('clamp_', - """ + r""" clamp_(min, max) -> Tensor In-place version of :meth:`~Tensor.clamp` """) add_docstr_all('clone', - """ + r""" clone() -> Tensor -Returns a copy of the tensor. The copy has the same size and data type as the -original tensor. +Returns a copy of the :attr:`self` tensor. The copy has the same size and data +type as :attr:`self`. """) add_docstr_all('contiguous', - """ + r""" contiguous() -> Tensor -Returns a contiguous Tensor containing the same data as this tensor. If this -tensor is contiguous, this function returns the original tensor. +Returns a contiguous tensor containing the same data as :attr:`self` tensor. If +:attr:`self` tensor is contiguous, this function returns the :attr:`self` +tensor. """) add_docstr_all('copy_', - """ + r""" copy_(src, async=False, broadcast=True) -> Tensor -Copies the elements from :attr:`src` into this tensor and returns this tensor. +Copies the elements from :attr:`src` into :attr:`self` tensor and returns +:attr:`self`. -If :attr:`broadcast` is True, the source tensor must be -:ref:`broadcastable ` with this tensor. Otherwise, -source tensor should have the same number of elements as this tensor. -It may be of a different data type or reside on a different device. +If :attr:`broadcast` is True, the :attr:`src` tensor must be +:ref:`broadcastable ` with :attr:`self` tensor. +Otherwise, :attr:`src` tensor should have the same number of elements as +:attr:`self` tensor. It may be of a different data type or reside on a +different device. Args: - src (Tensor): Source tensor to copy - async (bool): If ``True`` and this copy is between CPU and GPU, then the copy - may occur asynchronously with respect to the host. For other - copies, this argument has no effect. - broadcast (bool): If ``True``, :attr:`src` will be broadcast to the shape of + src (Tensor): the source tensor to copy from + async (bool): if ``True`` and this copy is between CPU and GPU, the copy may + occur asynchronously with respect to the host. For other cases, this + argument has no effect. + broadcast (bool): if ``True``, :attr:`src` will be broadcast to the shape of the underlying tensor. """) add_docstr_all('cos', - """ + r""" cos() -> Tensor See :func:`torch.cos` """) add_docstr_all('cos_', - """ + r""" cos_() -> Tensor In-place version of :meth:`~Tensor.cos` """) add_docstr_all('cosh', - """ + r""" cosh() -> Tensor See :func:`torch.cosh` """) add_docstr_all('cosh_', - """ + r""" cosh_() -> Tensor In-place version of :meth:`~Tensor.cosh` """) add_docstr_all('cross', - """ + r""" cross(other, dim=-1) -> Tensor See :func:`torch.cross` """) add_docstr_all('cumprod', - """ + r""" cumprod(dim) -> Tensor See :func:`torch.cumprod` """) add_docstr_all('cumsum', - """ + r""" cumsum(dim) -> Tensor See :func:`torch.cumsum` """) add_docstr_all('data_ptr', - """ + r""" data_ptr() -> int -Returns the address of the first element of this tensor. +Returns the address of the first element of :attr:`self` tensor. """) add_docstr_all('diag', - """ + r""" diag(diagonal=0) -> Tensor See :func:`torch.diag` """) add_docstr_all('dim', - """ + r""" dim() -> int -Returns the number of dimensions of this tensor. +Returns the number of dimensions of :attr:`self` tensor. """) add_docstr_all('dist', - """ + r""" dist(other, p=2) -> float See :func:`torch.dist` """) add_docstr_all('div', - """ + r""" div(value) See :func:`torch.div` """) add_docstr_all('div_', - """ + r""" div_(value) In-place version of :meth:`~Tensor.div` """) add_docstr_all('dot', - """ + r""" dot(tensor2) -> float See :func:`torch.dot` """) add_docstr_all('eig', - """ + r""" eig(eigenvectors=False) -> (Tensor, Tensor) See :func:`torch.eig` """) add_docstr_all('element_size', - """ + r""" element_size() -> int Returns the size in bytes of an individual element. @@ -448,59 +451,59 @@ def add_docstr_all(method, docstr): """) add_docstr_all('eq', - """ + r""" eq(other) -> Tensor See :func:`torch.eq` """) add_docstr_all('eq_', - """ + r""" eq_(other) -> Tensor In-place version of :meth:`~Tensor.eq` """) add_docstr_all('equal', - """ + r""" equal(other) -> bool See :func:`torch.equal` """) add_docstr_all('erf', - """ + r""" erf() -> Tensor See :func:`torch.erf` """) add_docstr_all('erfinv', - """ + r""" erfinv() -> Tensor See :func:`torch.erfinv` """) add_docstr_all('exp', - """ + r""" exp() -> Tensor See :func:`torch.exp` """) add_docstr_all('exp_', - """ + r""" exp_() -> Tensor In-place version of :meth:`~Tensor.exp` """) add_docstr_all('exponential_', - """ + r""" exponential_(lambd=1, *, generator=None) -> Tensor -Fills this tensor with elements drawn from the exponential distribution: +Fills :attr:`self` tensor with elements drawn from the exponential distribution: .. math:: @@ -508,87 +511,87 @@ def add_docstr_all(method, docstr): """) add_docstr_all('fill_', - """ + r""" fill_(value) -> Tensor -Fills this tensor with the specified value. +Fills :attr:`self` tensor with the specified value. """) add_docstr_all('floor', - """ + r""" floor() -> Tensor See :func:`torch.floor` """) add_docstr_all('floor_', - """ + r""" floor_() -> Tensor In-place version of :meth:`~Tensor.floor` """) add_docstr_all('fmod', - """ + r""" fmod(divisor) -> Tensor See :func:`torch.fmod` """) add_docstr_all('fmod_', - """ + r""" fmod_(divisor) -> Tensor In-place version of :meth:`~Tensor.fmod` """) add_docstr_all('frac', - """ + r""" frac() -> Tensor See :func:`torch.frac` """) add_docstr_all('frac_', - """ + r""" frac_() -> Tensor In-place version of :meth:`~Tensor.frac` """) add_docstr_all('gather', - """ + r""" gather(dim, index) -> Tensor See :func:`torch.gather` """) add_docstr_all('ge', - """ + r""" ge(other) -> Tensor See :func:`torch.ge` """) add_docstr_all('ge_', - """ + r""" ge_(other) -> Tensor In-place version of :meth:`~Tensor.ge` """) add_docstr_all('gels', - """ + r""" gels(A) -> Tensor See :func:`torch.gels` """) add_docstr_all('geometric_', - """ + r""" geometric_(p, *, generator=None) -> Tensor -Fills this tensor with elements drawn from the geometric distribution: +Fills :attr:`self` tensor with elements drawn from the geometric distribution: .. math:: @@ -597,70 +600,70 @@ def add_docstr_all(method, docstr): """) add_docstr_all('geqrf', - """ + r""" geqrf() -> (Tensor, Tensor) See :func:`torch.geqrf` """) add_docstr_all('ger', - """ + r""" ger(vec2) -> Tensor See :func:`torch.ger` """) add_docstr_all('gesv', - """ + r""" gesv(A) -> Tensor, Tensor See :func:`torch.gesv` """) add_docstr_all('gt', - """ + r""" gt(other) -> Tensor See :func:`torch.gt` """) add_docstr_all('gt_', - """ + r""" gt_(other) -> Tensor In-place version of :meth:`~Tensor.gt` """) add_docstr_all('histc', - """ + r""" histc(bins=100, min=0, max=0) -> Tensor See :func:`torch.histc` """) add_docstr_all('index', - """ + r""" index(m) -> Tensor -Selects elements from this tensor using a binary mask or along a given +Selects elements from :attr:`self` tensor using a binary mask or along a given dimension. The expression ``tensor.index(m)`` is equivalent to ``tensor[m]``. Args: - m (int or ByteTensor or slice): The dimension or mask used to select elements + m (int or ByteTensor or slice): the dimension or mask used to select elements """) add_docstr_all('index_add_', - """ + r""" index_add_(dim, index, tensor) -> Tensor -Accumulate the elements of tensor into the original tensor by adding to the -indices in the order given in index. The shape of tensor must exactly match the -elements indexed or an error will be raised. +Accumulate the elements of :attr:`tensor` into the :attr:`self` tensor by adding +to the indices in the order given in :attr:`index`. The shape of :attr:`tensor' +must exactly match the elements indexed or an error will be raised. Args: - dim (int): Dimension along which to index - index (LongTensor): Indices to select from tensor - tensor (Tensor): Tensor containing values to add + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`tensor` to select from + tensor (Tensor): the tensor containing values to add Example: >>> x = torch.Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) @@ -675,17 +678,17 @@ def add_docstr_all(method, docstr): """) add_docstr_all('index_copy_', - """ + r""" index_copy_(dim, index, tensor) -> Tensor -Copies the elements of tensor into the original tensor by selecting the -indices in the order given in index. The shape of tensor must exactly match the -elements indexed or an error will be raised. +Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting +the indices in the order given in index. The shape of :attr:`tensor` must +exactly match the elements indexed or an error will be raised. Args: - dim (int): Dimension along which to index - index (LongTensor): Indices to select from tensor - tensor (Tensor): Tensor containing values to copy + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`tensor` to select from + tensor (Tensor): the tensor containing values to copy Example: >>> x = torch.Tensor(3, 3) @@ -700,16 +703,16 @@ def add_docstr_all(method, docstr): """) add_docstr_all('index_fill_', - """ + r""" index_fill_(dim, index, val) -> Tensor -Fills the elements of the original tensor with value :attr:`val` by selecting -the indices in the order given in index. +Fills the elements of the :attr:`self` tensor with value :attr:`val` by +selecting the indices in the order given in :attr:`index`. Args: - dim (int): Dimension along which to index - index (LongTensor): Indices - val (float): Value to fill + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`self` tensor to fill in + val (float): the value to fill with Example: >>> x = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) @@ -723,28 +726,28 @@ def add_docstr_all(method, docstr): """) add_docstr_all('index_select', - """ + r""" index_select(dim, index) -> Tensor See :func:`torch.index_select` """) add_docstr_all('inverse', - """ + r""" inverse() -> Tensor See :func:`torch.inverse` """) add_docstr_all('is_contiguous', - """ + r""" is_contiguous() -> bool -Returns True if this tensor is contiguous in memory in C order. +Returns True if :attr:`self` tensor is contiguous in memory in C order. """) add_docstr_all('is_set_to', - """ + r""" is_set_to(tensor) -> bool Returns True if this object refers to the same ``THTensor`` object from the @@ -752,62 +755,62 @@ def add_docstr_all(method, docstr): """) add_docstr_all('kthvalue', - """ + r""" kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor) See :func:`torch.kthvalue` """) add_docstr_all('le', - """ + r""" le(other) -> Tensor See :func:`torch.le` """) add_docstr_all('le_', - """ + r""" le_(other) -> Tensor In-place version of :meth:`~Tensor.le` """) add_docstr_all('lerp', - """ + r""" lerp(start, end, weight) See :func:`torch.lerp` """) add_docstr_all('lerp_', - """ + r""" lerp_(start, end, weight) In-place version of :meth:`~Tensor.lerp` """) add_docstr_all('log', - """ + r""" log() -> Tensor See :func:`torch.log` """) add_docstr_all('log1p', - """ + r""" log1p() -> Tensor See :func:`torch.log1p` """) add_docstr_all('log1p_', - """ + r""" log1p_() -> Tensor In-place version of :meth:`~Tensor.log1p` """) -add_docstr_all('log_', """ +add_docstr_all('log_', r""" log_() -> Tensor In-place version of :meth:`~Tensor.log` @@ -816,37 +819,37 @@ def add_docstr_all(method, docstr): add_docstr_all('log_normal_', u""" log_normal_(mean=1, std=2, *, generator=None) -Fills this tensor with numbers samples from the log-normal distribution +Fills :attr:`self` tensor with numbers samples from the log-normal distribution parameterized by the given mean (\u00B5) and standard deviation (\u03C3). Note that :attr:`mean` and :attr:`stdv` are the mean and standard deviation of the underlying normal distribution, and not of the returned distribution: .. math:: - P(x) = \\dfrac{1}{x \\sigma \\sqrt{2\\pi}} e^{-\\dfrac{(\\ln x - \\mu)^2}{2\\sigma^2}} + P(x) = \\dfrac{1}{x \\sigma \\sqrt{2\\pi}}\ e^{-\\dfrac{(\\ln x - \\mu)^2}{2\\sigma^2}} """) add_docstr_all('lt', - """ + r""" lt(other) -> Tensor See :func:`torch.lt` """) add_docstr_all('lt_', - """ + r""" lt_(other) -> Tensor In-place version of :meth:`~Tensor.lt` """) add_docstr_all('map_', - """ + r""" map_(tensor, callable) -Applies :attr:`callable` for each element in this tensor and the given tensor -and stores the results in this tensor. This tensor and the given tensor must be -:ref:`broadcastable `. +Applies :attr:`callable` for each element in :attr:`self` tensor and the given +:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and +the given :attr:`tensor` must be :ref:`broadcastable `. The :attr:`callable` should have the signature:: @@ -854,18 +857,18 @@ def callable(a, b) -> number """) add_docstr_all('masked_scatter_', - """ + r""" masked_scatter_(mask, source) -Copies elements from :attr:`source` into this tensor at positions where the -:attr:`mask` is one. +Copies elements from :attr:`source` into :attr:`self` tensor at positions where +the :attr:`mask` is one. The shape of :attr:`mask` must be :ref:`broadcastable ` with the shape of the underlying tensor. The :attr:`source` should have at least as many elements as the number of ones in :attr:`mask` Args: - mask (ByteTensor): The binary mask - source (Tensor): The tensor to copy from + mask (ByteTensor): the binary mask + source (Tensor): the tensor to copy from .. note:: @@ -874,107 +877,108 @@ def callable(a, b) -> number """) add_docstr_all('masked_fill_', - """ + r""" masked_fill_(mask, value) -Fills elements of this tensor with :attr:`value` where :attr:`mask` is one. -The shape of :attr:`mask` must be :ref:`broadcastable ` -with the shape of the underlying tensor. +Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is +one. The shape of :attr:`mask` must be +:ref:`broadcastable ` with the shape of the underlying +tensor. Args: - mask (ByteTensor): The binary mask - value (float): The value to fill + mask (ByteTensor): the binary mask + value (float): the value to fill in with """) add_docstr_all('masked_select', - """ + r""" masked_select(mask) -> Tensor See :func:`torch.masked_select` """) add_docstr_all('max', - """ + r""" max(dim=None, keepdim=False) -> float or (Tensor, Tensor) See :func:`torch.max` """) add_docstr_all('mean', - """ + r""" mean(dim=None, keepdim=False) -> float or (Tensor, Tensor) See :func:`torch.mean` """) add_docstr_all('median', - """ + r""" median(dim=None, keepdim=False) -> (Tensor, LongTensor) See :func:`torch.median` """) add_docstr_all('min', - """ + r""" min(dim=None, keepdim=False) -> float or (Tensor, Tensor) See :func:`torch.min` """) add_docstr_all('mm', - """ + r""" mm(mat2) -> Tensor See :func:`torch.mm` """) add_docstr_all('mode', - """ + r""" mode(dim=None, keepdim=False) -> (Tensor, LongTensor) See :func:`torch.mode` """) add_docstr_all('mul', - """ + r""" mul(value) -> Tensor See :func:`torch.mul` """) add_docstr_all('mul_', - """ + r""" mul_(value) In-place version of :meth:`~Tensor.mul` """) add_docstr_all('multinomial', - """ + r""" multinomial(num_samples, replacement=False, *, generator=None) See :func:`torch.multinomial` """) add_docstr_all('mv', - """ + r""" mv(vec) -> Tensor See :func:`torch.mv` """) add_docstr_all('narrow', - """ + r""" narrow(dimension, start, length) -> Tensor -Returns a new tensor that is a narrowed version of this tensor. The dimension -:attr:`dim` is narrowed from :attr:`start` to :attr:`start + length`. The -returned tensor and this tensor share the same underlying storage. +Returns a new tensor that is a narrowed version of :attr:`self` tensor. The +dimension :attr:`dim` is narrowed from :attr:`start` to :attr:`start + length`. The +returned tensor and :attr:`self` tensor share the same underlying storage. Args: - dimension (int): The dimension along which to narrow - start (int): The starting dimension - length (int): + dimension (int): the dimension along which to narrow + start (int): the starting dimension + length (int): the distance to the ending dimension Example: >>> x = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) @@ -990,155 +994,155 @@ def callable(a, b) -> number """) add_docstr_all('ndimension', - """ + r""" ndimension() -> int Alias for :meth:`~Tensor.dim()` """) add_docstr_all('ne', - """ + r""" ne(other) -> Tensor See :func:`torch.ne` """) add_docstr_all('ne_', - """ + r""" ne_(other) -> Tensor In-place version of :meth:`~Tensor.ne` """) add_docstr_all('neg', - """ + r""" neg() -> Tensor See :func:`torch.neg` """) add_docstr_all('neg_', - """ + r""" neg_() -> Tensor In-place version of :meth:`~Tensor.neg` """) add_docstr_all('nelement', - """ + r""" nelement() -> int Alias for :meth:`~Tensor.numel` """) add_docstr_all('nonzero', - """ + r""" nonzero() -> LongTensor See :func:`torch.nonzero` """) add_docstr_all('norm', - """ + r""" norm(p=2, dim=None, keepdim=False) -> float See :func:`torch.norm` """) add_docstr_all('normal_', - """ + r""" normal_(mean=0, std=1, *, generator=None) -Fills this tensor with elements samples from the normal distribution +Fills :attr:`self` tensor with elements samples from the normal distribution parameterized by :attr:`mean` and :attr:`std`. """) add_docstr_all('numel', - """ + r""" numel() -> int See :func:`torch.numel` """) add_docstr_all('numpy', - """ + r""" numpy() -> ndarray -Returns this tensor as a NumPy :class:`ndarray`. This tensor and the returned -:class:`ndarray` share the same underlying storage. Changes to this tensor will -be reflected in the :class:`ndarray` and vice versa. +Returns :attr:`self` tensor as a NumPy :class:`ndarray`. This tensor and the +returned :class:`ndarray` share the same underlying storage. Changes to +:attr:`self` tensor will be reflected in the :class:`ndarray` and vice versa. """) add_docstr_all('orgqr', - """ + r""" orgqr(input2) -> Tensor See :func:`torch.orgqr` """) add_docstr_all('ormqr', - """ + r""" ormqr(input2, input3, left=True, transpose=False) -> Tensor See :func:`torch.ormqr` """) add_docstr_all('potrf', - """ + r""" potrf(upper=True) -> Tensor See :func:`torch.potrf` """) add_docstr_all('potri', - """ + r""" potri(upper=True) -> Tensor See :func:`torch.potri` """) add_docstr_all('potrs', - """ + r""" potrs(input2, upper=True) -> Tensor See :func:`torch.potrs` """) add_docstr_all('pow', - """ + r""" pow(exponent) See :func:`torch.pow` """) add_docstr_all('pow_', - """ + r""" pow_(exponent) In-place version of :meth:`~Tensor.pow` """) add_docstr_all('prod', - """ + r""" prod(dim=None, keepdim=False) -> float See :func:`torch.prod` """) add_docstr_all('pstrf', - """ + r""" pstrf(upper=True, tol=-1) -> (Tensor, IntTensor) See :func:`torch.pstrf` """) add_docstr_all('put_', - """ + r""" put_(indices, tensor, accumulate=False) -> Tensor Copies the elements from :attr:`tensor` into the positions specified by -indices. For the puropose of indexing, the ``self`` tensor is treated as if it -were a 1D tensor. +indices. For the puropose of indexing, the :attr:`self` tensor is treated as if +it were a 1-D tensor. If :attr:`accumulate` is ``True``, the elements in :attr:`tensor` are added to :attr:`self`. If accumulate is ``False``, the behavior is undefined if indices @@ -1146,8 +1150,8 @@ def callable(a, b) -> number Args: indices (LongTensor): the indices into self - tensor (Tensor): Tensor containing values to copy - accumulate (bool): True to accumulate into self + tensor (Tensor): the tensor containing values to copy from + accumulate (bool): whether to accumulate into self Example:: @@ -1160,77 +1164,78 @@ def callable(a, b) -> number """) add_docstr_all('qr', - """ + r""" qr() -> (Tensor, Tensor) See :func:`torch.qr` """) add_docstr_all('random_', - """ + r""" random_(from=0, to=None, *, generator=None) -Fills this tensor with numbers sampled from the discrete uniform distribution -over [from, to - 1]. If not specified, the values are usually only bounded by -this tensor's data type. However, for floating point types, if unspecified, -range will be [0, 2^mantissa] to ensure that every value is representable. -For example, `torch.DoubleTensor(1).random_()` will be uniform in [0, 2^53]. +Fills :attr:`self` tensor with numbers sampled from the discrete uniform +distribution over ``[from, to - 1]``. If not specified, the values are usually +only bounded by :attr:`self` tensor's data type. However, for floating point +types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every +value is representable. For example, `torch.DoubleTensor(1).random_()` will be +uniform in ``[0, 2^53]``. """) add_docstr_all('reciprocal', - """ + r""" reciprocal() -> Tensor See :func:`torch.reciprocal` """) add_docstr_all('reciprocal_', - """ + r""" reciprocal_() -> Tensor In-place version of :meth:`~Tensor.reciprocal` """) add_docstr_all('remainder', - """ + r""" remainder(divisor) -> Tensor See :func:`torch.remainder` """) add_docstr_all('remainder_', - """ + r""" remainder_(divisor) -> Tensor In-place version of :meth:`~Tensor.remainder` """) add_docstr_all('renorm', - """ + r""" renorm(p, dim, maxnorm) -> Tensor See :func:`torch.renorm` """) add_docstr_all('renorm_', - """ + r""" renorm_(p, dim, maxnorm) -> Tensor In-place version of :meth:`~Tensor.renorm` """) add_docstr_all('resize_', - """ + r""" resize_(*sizes) -Resizes this tensor to the specified size. If the number of elements is +Resizes :attr:`self` tensor to the specified size. If the number of elements is larger than the current storage size, then the underlying storage is resized to fit the new number of elements. If the number of elements is smaller, the underlying storage is not changed. Existing elements are preserved but any new memory is uninitialized. Args: - sizes (torch.Size or int...): The desired size + sizes (torch.Size or int...): the desired size Example: >>> x = torch.Tensor([[1, 2], [3, 4], [5, 6]]) @@ -1242,53 +1247,53 @@ def callable(a, b) -> number """) add_docstr_all('resize_as_', - """ + r""" resize_as_(tensor) -Resizes the current tensor to be the same size as the specified tensor. This is -equivalent to:: +Resizes the :attr:`self` tensor to be the same size as the specified +:attr:`tensor`. This is equivalent to:: self.resize_(tensor.size()) """) add_docstr_all('round', - """ + r""" round() -> Tensor See :func:`torch.round` """) add_docstr_all('round_', - """ + r""" round_() -> Tensor In-place version of :meth:`~Tensor.round` """) add_docstr_all('rsqrt', - """ + r""" rsqrt() -> Tensor See :func:`torch.rsqrt` """) add_docstr_all('rsqrt_', - """ + r""" rsqrt_() -> Tensor In-place version of :meth:`~Tensor.rsqrt` """) add_docstr_all('scatter_', - """ + r""" scatter_(dim, index, src) -> Tensor -Writes all values from the Tensor :attr:`src` into `self` at the indices -specified in the :attr:`index` Tensor. For each value in :attr:`src`, its output index -is specified by its index in :attr:`src` for dimension != :attr:`dim` and by the -corresponding value in :attr:`index` for dimension = :attr:`dim`. +Writes all values from the tensor :attr:`src` into :attr:`self` at the indices +specified in the :attr:`index` tensor. For each value in :attr:`src`, its output +index is specified by its index in :attr:`src` for dimension != :attr:`dim` and +by the corresponding value in :attr:`index` for dimension = :attr:`dim`. -For a 3-D tensor, `self` is updated as:: +For a 3-D tensor, :attr:`self` is updated as:: self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0 self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1 @@ -1306,10 +1311,10 @@ def callable(a, b) -> number the specified dimension :attr:`dim` must be unique. Args: - input (Tensor): The source tensor - dim (int): The axis along which to index - index (LongTensor): The indices of elements to scatter - src (Tensor or float): The source element(s) to scatter + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter + src (Tensor or float): the source element(s) to scatter Example:: @@ -1337,16 +1342,16 @@ def callable(a, b) -> number """) add_docstr_all('select', - """ + r""" select(dim, index) -> Tensor or number -Slices the tensor along the selected dimension at the given index. If this -tensor is one dimensional, this function returns a number. Otherwise, it -returns a tensor with the given dimension removed. +Slices the :attr:`self` tensor along the selected dimension at the given index. +If :attr:`self` is one dimensional, this function returns a number. Otherwise, +it returns a tensor with the given dimension removed. Args: - dim (int): Dimension to slice - index (int): Index to select + dim (int): the dimension to slice + index (int): the index to select with .. note:: @@ -1356,85 +1361,85 @@ def callable(a, b) -> number """) add_docstr_all('set_', - """ + r""" set_(source=None, storage_offset=0, size=None, stride=None) Sets the underlying storage, size, and strides. If :attr:`source` is a tensor, -this tensor will share the same storage and have the same size and strides -as the given tensor. Changes to elements in one tensor will be reflected in the -other. +:attr:`self` tensor will share the same storage and have the same size and +strides as :attr:`source`. Changes to elements in one tensor will be reflected +in the other. If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying storage, offset, size, and stride. Args: - source (Tensor or Storage): The tensor or storage to use - storage_offset (int): The offset in the storage - size (torch.Size): The desired size. Defaults to the size of the source. - stride (tuple): The desired stride. Defaults to C-contiguous strides. + source (Tensor or Storage): the tensor or storage to use + storage_offset (int): the offset in the storage + size (torch.Size): the desired size. Defaults to the size of the source. + stride (tuple): the desired stride. Defaults to C-contiguous strides. """) add_docstr_all('sigmoid', - """ + r""" sigmoid() -> Tensor See :func:`torch.sigmoid` """) add_docstr_all('sigmoid_', - """ + r""" sigmoid_() -> Tensor In-place version of :meth:`~Tensor.sigmoid` """) add_docstr_all('sign', - """ + r""" sign() -> Tensor See :func:`torch.sign` """) add_docstr_all('sign_', - """ + r""" sign_() -> Tensor In-place version of :meth:`~Tensor.sign` """) add_docstr_all('sin', - """ + r""" sin() -> Tensor See :func:`torch.sin` """) add_docstr_all('sin_', - """ + r""" sin_() -> Tensor In-place version of :meth:`~Tensor.sin` """) add_docstr_all('sinh', - """ + r""" sinh() -> Tensor See :func:`torch.sinh` """) add_docstr_all('sinh_', - """ + r""" sinh_() -> Tensor In-place version of :meth:`~Tensor.sinh` """) add_docstr_all('size', - """ + r""" size() -> torch.Size -Returns the size of the tensor. The returned value is a subclass of +Returns the size of the :attr:`self` tensor. The returned value is a subclass of :class:`tuple`. Example: @@ -1443,60 +1448,60 @@ def callable(a, b) -> number """) add_docstr_all('sort', - """ + r""" sort(dim=None, descending=False) -> (Tensor, LongTensor) See :func:`torch.sort` """) add_docstr_all('sqrt', - """ + r""" sqrt() -> Tensor See :func:`torch.sqrt` """) add_docstr_all('sqrt_', - """ + r""" sqrt_() -> Tensor In-place version of :meth:`~Tensor.sqrt` """) add_docstr_all('squeeze', - """ + r""" squeeze(dim=None) See :func:`torch.squeeze` """) add_docstr_all('squeeze_', - """ + r""" squeeze_(dim=None) In-place version of :meth:`~Tensor.squeeze` """) add_docstr_all('std', - """ + r""" std(dim=None, unbiased=True, keepdim=False) -> float See :func:`torch.std` """) add_docstr_all('storage', - """ + r""" storage() -> torch.Storage Returns the underlying storage """) add_docstr_all('storage_offset', - """ + r""" storage_offset() -> int -Returns this tensor's offset in the underlying storage in terms of number of -storage elements (not bytes). +Returns :attr:`self` tensor's offset in the underlying storage in terms of +number of storage elements (not bytes). Example: >>> x = torch.Tensor([1, 2, 3, 4, 5]) @@ -1507,16 +1512,18 @@ def callable(a, b) -> number """) add_docstr_all('stride', - """ + r""" stride(dim) -> tuple or int -Returns the stride of the tensor. -Stride is the jump necessary to go from one element to the next one in the specified dimension dim. -Tuple is returned when no Argument is passed. So we get stride in all dimensions. -Integer value is returned when we desire stride in particular dimension. +Returns the stride of :attr:`self` tensor. + +Stride is the jump necessary to go from one element to the next one in the +specified dimension :attr:`dim`. A tuple of all strides is returned when no +argument is passed in. Otherwise, an integer value is returned as the stride in +the particular dimension :attr:`dim`. Args: - dim (int): The desired dimension in which stride is required. + dim (int, optional): the desired dimension in which stride is required Example: >>> x = torch.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) @@ -1529,10 +1536,10 @@ def callable(a, b) -> number """) add_docstr_all('sub', - """ + r""" sub(value, other) -> Tensor -Subtracts a scalar or tensor from this tensor. If both :attr:`value` and +Subtracts a scalar or tensor from :attr:`self` tensor. If both :attr:`value` and :attr:`other` are specified, each element of :attr:`other` is scaled by :attr:`value` before being used. @@ -1543,169 +1550,170 @@ def callable(a, b) -> number """) add_docstr_all('sub_', - """ + r""" sub_(x) -> Tensor In-place version of :meth:`~Tensor.sub` """) add_docstr_all('sum', - """ + r""" sum(dim=None, keepdim=False) -> float See :func:`torch.sum` """) add_docstr_all('svd', - """ + r""" svd(some=True) -> (Tensor, Tensor, Tensor) See :func:`torch.svd` """) add_docstr_all('symeig', - """ + r""" symeig(eigenvectors=False, upper=True) -> (Tensor, Tensor) See :func:`torch.symeig` """) add_docstr_all('t', - """ + r""" t() -> Tensor See :func:`torch.t` """) add_docstr_all('t_', - """ + r""" t_() -> Tensor In-place version of :meth:`~Tensor.t` """) add_docstr_all('take', - """ + r""" take(indices) -> Tensor See :func:`torch.take` """) add_docstr_all('tan_', - """ + r""" tan_() -> Tensor In-place version of :meth:`~Tensor.tan` """) add_docstr_all('tanh', - """ + r""" tanh() -> Tensor See :func:`torch.tanh` """) add_docstr_all('tanh_', - """ + r""" tanh_() -> Tensor In-place version of :meth:`~Tensor.tanh` """) add_docstr_all('topk', - """ + r""" topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor) See :func:`torch.topk` """) add_docstr_all('trace', - """ + r""" trace() -> float See :func:`torch.trace` """) add_docstr_all('transpose', - """ + r""" transpose(dim0, dim1) -> Tensor See :func:`torch.transpose` """) add_docstr_all('transpose_', - """ + r""" transpose_(dim0, dim1) -> Tensor In-place version of :meth:`~Tensor.transpose` """) add_docstr_all('tril', - """ + r""" tril(k=0) -> Tensor See :func:`torch.tril` """) add_docstr_all('tril_', - """ + r""" tril_(k=0) -> Tensor In-place version of :meth:`~Tensor.tril` """) add_docstr_all('triu', - """ + r""" triu(k=0) -> Tensor See :func:`torch.triu` """) add_docstr_all('triu_', - """ + r""" triu_(k=0) -> Tensor In-place version of :meth:`~Tensor.triu` """) add_docstr_all('trtrs', - """ + r""" trtrs(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor) See :func:`torch.trtrs` """) add_docstr_all('trunc', - """ + r""" trunc() -> Tensor See :func:`torch.trunc` """) add_docstr_all('trunc_', - """ + r""" trunc_() -> Tensor In-place version of :meth:`~Tensor.trunc` """) add_docstr_all('unfold', - """ + r""" unfold(dim, size, step) -> Tensor -Returns a tensor which contains all slices of size :attr:`size` in -the dimension :attr:`dim`. +Returns a tensor which contains all slices of size :attr:`size` from +:attr:`self` tensor in the dimension :attr:`dim`. Step between two slices is given by :attr:`step`. -If `sizedim` is the original size of dimension dim, the size of dimension `dim` -in the returned tensor will be `(sizedim - size) / step + 1` +If `sizedim` is the size of dimension dim for :attr:`self`, the size of +dimension :attr:`dim` in the returned tensor will be +`(sizedim - size) / step + 1`. An additional dimension of size size is appended in the returned tensor. Args: dim (int): dimension in which unfolding happens - size (int): size of each slice that is unfolded + size (int): the size of each slice that is unfolded step (int): the step between each slice Example:: @@ -1742,10 +1750,10 @@ def callable(a, b) -> number """) add_docstr_all('uniform_', - """ + r""" uniform_(from=0, to=1) -> Tensor -Fills this tensor with numbers sampled from the uniform distribution: +Fills :attr:`self` tensor with numbers sampled from the uniform distribution: .. math: @@ -1753,38 +1761,39 @@ def callable(a, b) -> number """) add_docstr_all('unsqueeze', - """ + r""" unsqueeze(dim) See :func:`torch.unsqueeze` """) add_docstr_all('unsqueeze_', - """ + r""" unsqueeze_(dim) In-place version of :meth:`~Tensor.unsqueeze` """) add_docstr_all('var', - """ + r""" var(dim=None, unbiased=True, keepdim=False) -> float See :func:`torch.var` """) add_docstr_all('view', - """ + r""" view(*args) -> Tensor -Returns a new tensor with the same data but different size. +Returns a new tensor with the same data as the :attr:`self` tensor but of a +different size. The returned tensor shares the same data and must have the same number of elements, but may have a different size. A tensor must be :func:`contiguous` to be viewed. Args: - args (torch.Size or int...): Desired size + args (torch.Size or int...): the desired size Example: >>> x = torch.randn(4, 4) @@ -1799,18 +1808,18 @@ def callable(a, b) -> number """) add_docstr_all('expand', - """ + r""" expand(*sizes) -> Tensor -Returns a new view of the tensor with singleton dimensions expanded +Returns a new view of the :attr:`self` tensor with singleton dimensions expanded to a larger size. Passing -1 as the size for a dimension means not changing the size of that dimension. Tensor can be also expanded to a larger number of dimensions, and the -new ones will be appended at the front. (For the new dimensions, the -size cannot be set to -1.) +new ones will be appended at the front. For the new dimensions, the +size cannot be set to -1. Expanding a tensor does not allocate new memory, but only creates a new view on the existing tensor where a dimension of size one is @@ -1819,7 +1828,7 @@ def callable(a, b) -> number memory. Args: - *sizes (torch.Size or int...): The desired expanded size + *sizes (torch.Size or int...): the desired expanded size Example: >>> x = torch.Tensor([[1], [2], [3]]) @@ -1838,8 +1847,8 @@ def callable(a, b) -> number """) add_docstr_all('zero_', - """ -zero_() + r""" +zero_() -> Tensor -Fills this tensor with zeros. +Fills :attr:`self` tensor with zeros. """) diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index e734834f0ee8..2e0bc2cd23ef 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -4,9 +4,9 @@ from torch._C import _add_docstr as add_docstr add_docstr(torch._C.abs, - """abs(input, out=None) -> Tensor + r"""abs(input, out=None) -> Tensor -Computes the element-wise absolute value of the given :attr:`input` a tensor. +Computes the element-wise absolute value of the given :attr:`input` tensor. Example:: @@ -15,14 +15,14 @@ """) add_docstr(torch._C.acos, - """ + r""" acos(input, out=None) -> Tensor -Returns a new `Tensor` with the arccosine of the elements of :attr:`input`. +Returns a new tensor with the arccosine of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -44,21 +44,22 @@ """) add_docstr(torch._C.add, - """ + r""" .. function:: add(input, value, out=None) Adds the scalar :attr:`value` to each element of the input :attr:`input` and returns a new resulting tensor. -:math:`out = tensor + value` +.. math:: + out = input + value If :attr:`input` is of type FloatTensor or DoubleTensor, :attr:`value` must be a real number, otherwise it should be an integer. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor value (Number): the number to be added to each element of :attr:`input` - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -82,23 +83,24 @@ .. function:: add(input, value=1, other, out=None) -Each element of the Tensor :attr:`other` is multiplied by the scalar -:attr:`value` and added to each element of the Tensor :attr:`input`. -The resulting Tensor is returned. +Each element of the tensor :attr:`other` is multiplied by the scalar +:attr:`value` and added to each element of the tensor :attr:`input`. +The resulting tensor is returned. The shapes of :attr:`input` and :attr:`other` must be :ref:`broadcastable `. -:math:`out = input + (other * value)` +.. math:: + out = input + value \times other If :attr:`other` is of type FloatTensor or DoubleTensor, :attr:`value` must be a real number, otherwise it should be an integer. Args: - input (Tensor): the first input `Tensor` + input (Tensor): the first input tensor value (Number): the scalar multiplier for :attr:`other` - other (Tensor): the second input `Tensor` - out (Tensor, optional): The result `Tensor` + other (Tensor): the second input tensor + out (Tensor, optional): the output tensor Example:: @@ -130,7 +132,7 @@ """) add_docstr(torch._C.addbmm, - """ + r""" addbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) -> Tensor Performs a batch matrix-matrix product of matrices stored @@ -139,15 +141,16 @@ along the first dimension). :attr:`mat` is added to the final result. -:attr:`batch1` and :attr:`batch2` must be 3D Tensors each containing the +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same number of matrices. -If :attr:`batch1` is a `b x n x m` Tensor, :attr:`batch2` is a `b x m x p` -Tensor, ::attr:`mat` must be :ref:`broadcastable ` -with a `n x p` Tensor and attr:`out` will be a `n x p` Tensor. +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, ::attr:`mat` must be +:ref:`broadcastable ` with a :math:`(n \times p)` tensor +and attr:`out` will be a :math:`(n \times p)` tensor. -In other words, -:math:`res = (beta * M) + (alpha * sum(batch1_i @ batch2_i, i = 0, b))` +.. math:: + out = \beta\ mat + \alpha\ (\sum_{i=0}^{b} batch1_i \mathbin{@} batch2_i) For inputs of type `FloatTensor` or `DoubleTensor`, args `beta` and `alpha` must be real numbers, otherwise they should be integers. @@ -156,9 +159,9 @@ beta (Number, optional): multiplier for :attr:`mat` mat (Tensor): matrix to be added alpha (Number, optional): multiplier for `batch1 @ batch2` - batch1 (Tensor): First batch of matrices to be multiplied - batch2 (Tensor): Second batch of matrices to be multiplied - out (Tensor, optional): Output tensor + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + out (Tensor, optional): the output tensor Example:: @@ -174,12 +177,15 @@ """) add_docstr(torch._C.addcdiv, - """ + r""" addcdiv(tensor, value=1, tensor1, tensor2, out=None) -> Tensor Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, multiply the result by the scalar :attr:`value` and add it to :attr:`tensor`. +.. math:: + out_i = tensor_i + value \times \frac{tensor1_i}{tensor2_i} + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be :ref:`broadcastable `. @@ -188,10 +194,10 @@ Args: tensor (Tensor): the tensor to be added - value (Number, optional): multiplier for `tensor1 ./ tensor2` - tensor1 (Tensor): Numerator tensor - tensor2 (Tensor): Denominator tensor - out (Tensor, optional): Output tensor + value (Number, optional): multiplier for :math:`tensor1 ./ tensor2` + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + out (Tensor, optional): the output tensor Example:: @@ -206,13 +212,16 @@ """) add_docstr(torch._C.addcmul, - """ + r""" addcmul(tensor, value=1, tensor1, tensor2, out=None) -> Tensor Performs the element-wise multiplication of :attr:`tensor1` by :attr:`tensor2`, multiply the result by the scalar :attr:`value` and add it to :attr:`tensor`. +.. math:: + out_i = tensor_i + value \times tensor1_i \times tensor2_i + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be :ref:`broadcastable `. @@ -221,10 +230,10 @@ Args: tensor (Tensor): the tensor to be added - value (Number, optional): multiplier for `tensor1 .* tensor2` - tensor1 (Tensor): tensor to be multiplied - tensor2 (Tensor): tensor to be multiplied - out (Tensor, optional): Output tensor + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + out (Tensor, optional): the output tensor Example:: @@ -239,20 +248,21 @@ """) add_docstr(torch._C.addmm, - """ + r""" addmm(beta=1, mat, alpha=1, mat1, mat2, out=None) -> Tensor Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. The matrix :attr:`mat` is added to the final result. -If :attr:`mat1` is a `n x m` Tensor, :attr:`mat2` is a `m x p` Tensor, -then :attr:`mat` must be :ref:`broadcastable ` with -a `n x p` Tensor and :attr:`out` will be a `n x p` Tensor. +If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +:math:`(m \times p)` tensor, then :attr:`mat` must be +:ref:`broadcastable ` with a :math:`(n \times p)` tensor +and :attr:`out` will be a :math:`(n \times p)` tensor. `alpha` and `beta` are scaling factors on `mat1 @ mat2` and `mat` respectively. -In other words, -:math:`out = (beta * M) + (alpha * mat1 @ mat2)` +.. math:: + out = \beta\ mat + \alpha\ (mat1_i \mathbin{@} mat2_i) For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and :attr:`alpha` must be real numbers, otherwise they should be integers. @@ -260,10 +270,10 @@ Args: beta (Number, optional): multiplier for :attr:`mat` mat (Tensor): matrix to be added - alpha (Number, optional): multiplier for `mat1 @ mat2` - mat1 (Tensor): First matrix to be multiplied - mat2 (Tensor): Second matrix to be multiplied - out (Tensor, optional): Output tensor + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` + mat1 (Tensor): the first matrix to be multiplied + mat2 (Tensor): the second matrix to be multiplied + out (Tensor, optional): the output tensor Example:: @@ -278,22 +288,22 @@ """) add_docstr(torch._C.addmv, - """ + r""" addmv(beta=1, tensor, alpha=1, mat, vec, out=None) -> Tensor Performs a matrix-vector product of the matrix :attr:`mat` and the vector :attr:`vec`. The vector :attr:`tensor` is added to the final result. -If :attr:`mat` is a `n x m` Tensor, :attr:`vec` is a 1D Tensor of size `m`, -then :attr:`tensor` must be :ref:`broadcastable ` -with a 1D tensor of size `n` and :attr:`out` will be 1D tensor of size `n`. +If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +size `m`, then :attr:`tensor` must be +:ref:`broadcastable ` with a 1-D tensor of size `n` and +:attr:`out` will be 1-D tensor of size `n`. `alpha` and `beta` are scaling factors on `mat * vec` and `tensor` respectively. -In other words: - -:math:`out = (beta * tensor) + (alpha * (mat @ vec2))` +.. math:: + out = \beta\ tensor + \alpha\ (mat \mathbin{@} vec) For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and :attr:`alpha` must be real numbers, otherwise they should be integers @@ -301,10 +311,10 @@ Args: beta (Number, optional): multiplier for :attr:`tensor` tensor (Tensor): vector to be added - alpha (Number, optional): multiplier for `mat @ vec` + alpha (Number, optional): multiplier for :math:`mat @ vec` mat (Tensor): matrix to be multiplied vec (Tensor): vector to be multiplied - out (Tensor, optional): Output tensor + out (Tensor, optional): the output tensor Example:: @@ -326,27 +336,27 @@ and adds it to the matrix :attr:`mat`. Optional values :attr:`beta` and :attr:`alpha` are scalars that multiply -:attr:`mat` and :math:`(vec1 \otimes vec2)` respectively +:attr:`mat` and :math:`(vec1 \otimes vec2)` respectively. -In other words, -:math:`out = (beta * mat) + (alpha * vec1 \otimes vec2)` +.. math:: + out = \beta\ mat + \alpha\ (vec1 \otimes vec2) If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector of size `m`, then :attr:`mat` must be -:ref:`broadcastable ` with a matrix of size `n x m` -and :attr:`out` will be a matrix of size `n x m`. +:ref:`broadcastable ` with a matrix of size +:math:`(n \times m)` and :attr:`out` will be a matrix of size +:math:`(n \times m)`. For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and :attr:`alpha` must be real numbers, otherwise they should be integers Args: - beta (Number, optional): Multiplier for :attr:`mat` - mat (Tensor): Matrix to be added - alpha (Number, optional): Multiplier for outer product of - for :attr:`vec1` and :attr:`vec2` - vec1 (Tensor): First vector of the outer product - vec2 (Tensor): Second vector of the outer product - out (Tensor, optional): Output tensor + beta (Number, optional): multiplier for :attr:`mat` + mat (Tensor): matrix to be added + alpha (Number, optional): multiplier for :math:`vec1 \otimes vec2` + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + out (Tensor, optional): the output tensor Example:: @@ -361,14 +371,14 @@ """) add_docstr(torch._C.asin, - """ + r""" asin(input, out=None) -> Tensor -Returns a new `Tensor` with the arcsine of the elements of :attr:`input`. +Returns a new tensor with the arcsine of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -389,14 +399,14 @@ """) add_docstr(torch._C.atan, - """ + r""" atan(input, out=None) -> Tensor -Returns a new `Tensor` with the arctangent of the elements of :attr:`input`. +Returns a new tensor with the arctangent of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -417,19 +427,19 @@ """) add_docstr(torch._C.atan2, - """ + r""" atan2(input1, input2, out=None) -> Tensor -Returns a new `Tensor` with the arctangent of the elements of :attr:`input1` +Returns a new tensor with the arctangent of the elements of :attr:`input1` and :attr:`input2`. The shapes of :attr:`input1` and :attr:`input2` must be :ref:`broadcastable `. Args: - input1 (Tensor): the first input `Tensor` - input2 (Tensor): the second input `Tensor` - out (Tensor, optional): The result `Tensor` + input1 (Tensor): the first input tensor + input2 (Tensor): the second input tensor + out (Tensor, optional): the output tensor Example:: @@ -457,26 +467,28 @@ and :attr:`batch2`. :attr:`mat` is added to the final result. -:attr:`batch1` and :attr:`batch2` must be 3D Tensors each containing the same +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same number of matrices. -If :attr:`batch1` is a `b x n x m` Tensor, :attr:`batch2` is a `b x m x p` -Tensor, then :attr:`mat` must be :ref:`broadcastable ` -with a `b x n x p` Tensor and :attr:`out` will be a `b x n x p` Tensor. +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, then :attr:`mat` must be +:ref:`broadcastable ` with a +:math:`(b \times n \times p)` tensor and :attr:`out` will be a +:math:`(b \times n \times p)` tensor. -In other words, -:math:`res_i = (beta * M_i) + (alpha * batch1_i \times batch2_i)` +.. math:: + out_i = \beta\ mat_i + \alpha\ (batch1_i \mathbin{@} batch2_i) For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and :attr:`alpha` must be real numbers, otherwise they should be integers. Args: beta (Number, optional): multiplier for :attr:`mat` - mat (Tensor): tensor to be added + mat (Tensor): the tensor to be added alpha (Number, optional): multiplier for `batch1 @ batch2` - batch1 (Tensor): First batch of matrices to be multiplied - batch2 (Tensor): Second batch of matrices to be multiplied - out (Tensor, optional): Output tensor + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + out (Tensor, optional): the output tensor Example:: @@ -488,25 +500,25 @@ """) add_docstr(torch._C.bernoulli, - """ + r""" bernoulli(input, out=None) -> Tensor -Draws binary random numbers (0 or 1) from a bernoulli distribution. +Draws binary random numbers (0 or 1) from a Bernoulli distribution. -The :attr:`input` Tensor should be a tensor containing probabilities +The :attr:`input` tensor should be a tensor containing probabilities to be used for drawing the binary random number. Hence, all values in :attr:`input` have to be in the range: -:math:`0 <= input_i <= 1` +:math:`0 \leq input_i \leq 1` The `i-th` element of the output tensor will draw a value `1` according to the `i-th` probability value given in :attr:`input`. -The returned :attr:`out` Tensor only has values 0 or 1 and is of the same +The returned :attr:`out` tensor only has values 0 or 1 and is of the same shape as :attr:`input` Args: - input (Tensor): Probability values for the bernoulli distribution - out (Tensor, optional): Output tensor + input (Tensor): the input tensor of probability values for the Bernoulli distribution + out (Tensor, optional): the output tensor Example:: @@ -544,25 +556,29 @@ """) add_docstr(torch._C.bmm, - """ + r""" bmm(batch1, batch2, out=None) -> Tensor Performs a batch matrix-matrix product of matrices stored in :attr:`batch1` and :attr:`batch2`. -:attr:`batch1` and :attr:`batch2` must be 3D Tensors each containing +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same number of matrices. -If :attr:`batch1` is a `b x n x m` Tensor, :attr:`batch2` is a `b x m x p` -Tensor, :attr:`out` will be a `b x n x p` Tensor. +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, :attr:`out` will be a +:math:`(b \times n \times p)` tensor. + +.. math:: + out_i = batch1_i \mathbin{@} batch2_i .. note:: This function does not :ref:`broadcast `. For broadcasting matrix products, see :func:`torch.matmul`. Args: - batch1 (Tensor): First batch of matrices to be multiplied - batch2 (Tensor): Second batch of matrices to be multiplied - out (Tensor, optional): Output tensor + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + out (Tensor, optional): the output tensor Example:: @@ -574,10 +590,10 @@ """) add_docstr(torch._C.cat, - """ + r""" cat(seq, dim=0, out=None) -> Tensor -Concatenates the given sequence of :attr:`seq` Tensors in the given dimension. +Concatenates the given sequence of :attr:`seq` tensors in the given dimension. :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` and :func:`torch.chunk` @@ -585,10 +601,9 @@ :func:`cat` can be best understood via examples. Args: - seq (sequence of Tensors): Can be any python sequence of `Tensor` - of the same type. - dim (int, optional): The dimension over which the tensors are concatenated - out (Tensor, optional): Output argument + seq (sequence of tensors): any python sequence of tensors of the same type + dim (int, optional): the dimension over which the tensors are concatenated + out (Tensor, optional): the output tensor Example:: @@ -618,15 +633,15 @@ """) add_docstr(torch._C.ceil, - """ + r""" ceil(input, out=None) -> Tensor -Returns a new `Tensor` with the ceil of the elements of :attr:`input`, +Returns a new tensor with the ceil of the elements of :attr:`input`, the smallest integer greater than or equal to each element. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -650,15 +665,15 @@ """) add_docstr(torch._C.reciprocal, - """ + r""" reciprocal(input, out=None) -> Tensor -Returns a new `Tensor` with the reciprocal of the elements of :attr:`input`, -i.e. :math:`1.0 / x` +Returns a new tensor with the reciprocal of the elements of :attr:`input`, +i.e. :math:`x^{-1} = \frac{1}{x}`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -682,26 +697,27 @@ """) add_docstr(torch._C.clamp, - """ + r""" clamp(input, min, max, out=None) -> Tensor Clamp all elements in :attr:`input` into the range `[min, max]` and return -a resulting Tensor. - -:: +a resulting tensor: - | min, if x_i < min - y_i = | x_i, if min <= x_i <= max - | max, if x_i > max +.. math:: + y_i = \begin{cases} + min & \text{if } x_i < min \\ + x_i & \text{if } min \leq x_i \leq max \\ + max & \text{if } x_i > max + \end{cases} If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min` -and :attr:`max` must be real numbers, otherwise they should be integers +and :attr:`max` must be real numbers, otherwise they should be integers. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor min (Number): lower-bound of the range to be clamped to max (Number): upper-bound of the range to be clamped to - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -727,12 +743,12 @@ Clamps all elements in :attr:`input` to be larger or equal :attr:`min`. If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value` -should be a real number, otherwise it should be an integer +should be a real number, otherwise it should be an integer. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor value (Number): minimal value of each element in the output - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -758,12 +774,12 @@ Clamps all elements in :attr:`input` to be smaller or equal :attr:`max`. If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value` -should be a real number, otherwise it should be an integer +should be a real number, otherwise it should be an integer. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor value (Number): maximal value of each element in the output - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -787,14 +803,14 @@ """) add_docstr(torch._C.cos, - """ + r""" cos(input, out=None) -> Tensor -Returns a new `Tensor` with the cosine of the elements of :attr:`input`. +Returns a new tensor with the cosine of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -815,15 +831,15 @@ """) add_docstr(torch._C.cosh, - """ + r""" cosh(input, out=None) -> Tensor -Returns a new `Tensor` with the hyperbolic cosine of the elements of +Returns a new tensor with the hyperbolic cosine of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -844,7 +860,7 @@ """) add_docstr(torch._C.cross, - """ + r""" cross(input, other, dim=-1, out=None) -> Tensor @@ -858,10 +874,10 @@ size 3. Args: - input (Tensor): the input `Tensor` - other (Tensor): the second input `Tensor` + input (Tensor): the input tensor + other (Tensor): the second input tensor dim (int, optional): the dimension to take the cross-product in. - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -901,20 +917,22 @@ """) add_docstr(torch._C.cumprod, - """ + r""" cumprod(input, dim, out=None) -> Tensor Returns the cumulative product of elements of :attr:`input` in the dimension :attr:`dim`. For example, if :attr:`input` is a vector of size N, the result will also be -a vector of size N, with elements: -:math:`y_i = x_1 * x_2 * x_3 * ... * x_i` +a vector of size N, with elements. + +.. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to do the operation over - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -965,20 +983,22 @@ """) add_docstr(torch._C.cumsum, - """ + r""" cumsum(input, dim, out=None) -> Tensor Returns the cumulative sum of elements of :attr:`input` in the dimension :attr:`dim`. For example, if :attr:`input` is a vector of size N, the result will also be -a vector of size N, with elements: -:math:`y_i = x_1 + x_2 + x_3 + ... + x_i` +a vector of size N, with elements. + +.. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to do the operation over - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -1015,24 +1035,24 @@ """) add_docstr(torch._C.diag, - """ + r""" diag(input, diagonal=0, out=None) -> Tensor -- If :attr:`input` is a vector (1D Tensor), then returns a 2D square Tensor +- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor with the elements of :attr:`input` as the diagonal. -- If :attr:`input` is a matrix (2D Tensor), then returns a 1D Tensor with +- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with the diagonal elements of :attr:`input`. -The argument :attr:`diagonal` controls which diagonal to consider. +The argument :attr:`diagonal` controls which diagonal to consider: -- :attr:`diagonal` = 0, is the main diagonal. -- :attr:`diagonal` > 0, is above the main diagonal. -- :attr:`diagonal` < 0, is below the main diagonal. +- If :attr:`diagonal` = 0, it is the main diagonal. +- If :attr:`diagonal` > 0, it is above the main diagonal. +- If :attr:`diagonal` < 0, it is below the main diagonal. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor diagonal (int, optional): the diagonal to consider - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example: @@ -1088,7 +1108,7 @@ """) add_docstr(torch._C.dist, - """ + r""" dist(input, other, p=2) -> float Returns the p-norm of (:attr:`input` - :attr:`other`) @@ -1097,9 +1117,9 @@ :ref:`broadcastable `. Args: - input (Tensor): the input `Tensor` - other (Tensor): the Right-hand-side input `Tensor` - p (float, optional): The norm to be computed. + input (Tensor): the input tensor + other (Tensor): the Right-hand-side input tensor + p (float, optional): the norm to be computed Example:: @@ -1134,21 +1154,22 @@ """) add_docstr(torch._C.div, - """ + r""" .. function:: div(input, value, out=None) Divides each element of the input :attr:`input` with the scalar :attr:`value` and returns a new resulting tensor. -:math:`out = tensor / value` +.. math:: + out_i = \frac{input_i}{value} If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value` should be a real number, otherwise it should be an integer Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor value (Number): the number to be divided to each element of :attr:`input` - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -1174,17 +1195,18 @@ .. function:: div(input, other, out=None) -Each element of the Tensor :attr:`input` is divided by each element -of the Tensor :attr:`other`. The resulting Tensor is returned. The shapes of +Each element of the tensor :attr:`input` is divided by each element +of the tensor :attr:`other`. The resulting tensor is returned. The shapes of :attr:`input` and :attr:`other` must be :ref:`broadcastable `. -:math:`out_i = input_i / other_i` +.. math:: + out_i = \frac{input_i}{other_i} Args: - input (Tensor): the numerator `Tensor` - other (Tensor): the denominator `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the numerator tensor + other (Tensor): the denominator tensor + out (Tensor, optional): the output tensor Example:: @@ -1222,7 +1244,7 @@ """) add_docstr(torch._C.dot, - """ + r""" dot(tensor1, tensor2) -> float Computes the dot product (inner product) of two tensors. @@ -1236,28 +1258,25 @@ """) add_docstr(torch._C.eig, - """ + r""" eig(a, eigenvectors=False, out=None) -> (Tensor, Tensor) Computes the eigenvalues and eigenvectors of a real square matrix. Args: - a (Tensor): A square matrix for which the eigenvalues and eigenvectors will - be computed - eigenvectors (bool): ``True`` to compute both eigenvalues and eigenvectors. - Otherwise, only eigenvalues will be computed. - out (tuple, optional): Output tensors + a (Tensor): the square matrix for which the eigenvalues and eigenvectors will be computed + eigenvectors (bool): ``True`` to compute both eigenvalues and eigenvectors; otherwise, only eigenvalues will be computed + out (tuple, optional): the output tensors Returns: - (Tensor, Tensor): tuple containing + (Tensor, Tensor): A tuple containing - **e** (*Tensor*): the right eigenvalues of ``a`` - - **v** (*Tensor*): the eigenvectors of ``a`` if ``eigenvectors`` - is ``True``; otherwise an empty tensor + - **v** (*Tensor*): the eigenvectors of ``a`` if ``eigenvectors`` is ``True``; otherwise an empty tensor """) add_docstr(torch._C.eq, - """ + r""" eq(input, other, out=None) -> Tensor Computes element-wise equality @@ -1266,14 +1285,12 @@ :ref:`broadcastable ` with the first argument. Args: - input (Tensor): Tensor to compare - other (Tensor or float): Tensor or value to compare - out (Tensor, optional): Output tensor. Must be a `ByteTensor` or the same - type as `tensor`. + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + out (Tensor, optional): the output tensor. Must be a `ByteTensor` or the same type as `input`. Returns: - Tensor: a ``torch.ByteTensor`` containing a 1 at each location where the - tensors are equal and a 0 at every other location + Tensor: A ``torch.ByteTensor`` containing a 1 at each location where the tensors are equal and a 0 at every other location Example:: @@ -1284,7 +1301,7 @@ """) add_docstr(torch._C.equal, - """ + r""" equal(tensor1, tensor2) -> bool ``True`` if two tensors have the same size and elements, ``False`` otherwise. @@ -1296,7 +1313,7 @@ """) add_docstr(torch._C.erf, - """ + r""" erf(tensor, out=None) -> Tensor Computes the error function of each element. @@ -1308,7 +1325,7 @@ """) add_docstr(torch._C.erfinv, - """ + r""" erfinv(tensor, out=None) -> Tensor Computes the inverse error function of each element. @@ -1320,7 +1337,7 @@ """) add_docstr(torch._C.exp, - """ + r""" exp(tensor, out=None) -> Tensor Computes the exponential of each element. @@ -1332,18 +1349,18 @@ """) add_docstr(torch._C.eye, - """ + r""" eye(n, m=None, out=None) Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. Args: - n (int): Number of rows - m (int, optional): Number of columns. If None, defaults to `n` - out (Tensor, optional): Output tensor + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + out (Tensor, optional): the output tensor Returns: - Tensor: a 2-D tensor with ones on the diagonal and zeros elsewhere + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere Example:: @@ -1355,15 +1372,15 @@ """) add_docstr(torch._C.floor, - """ + r""" floor(input, out=None) -> Tensor -Returns a new `Tensor` with the floor of the elements of :attr:`input`, +Returns a new tensor with the floor of the elements of :attr:`input`, the largest integer less than or equal to each element. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -1388,22 +1405,21 @@ """) add_docstr(torch._C.fmod, - """ + r""" fmod(input, divisor, out=None) -> Tensor Computes the element-wise remainder of division. The dividend and divisor may contain both for integer and floating point -numbers. The remainder has the same sign as the dividend `tensor`. +numbers. The remainder has the same sign as the dividend :attr:`input`. -When :attr:`divisor` is a Tensor, the shapes of :attr:`input` and +When :attr:`divisor` is a tensor, the shapes of :attr:`input` and :attr:`divisor` must be :ref:`broadcastable `. Args: - input (Tensor): The dividend - divisor (Tensor or float): The divisor. This may be either a number or a - tensor of the same shape as the dividend. - out (Tensor, optional): Output tensor + input (Tensor): the dividend + divisor (Tensor or float): the divisor, which may be either a number or a tensor of the same shape as the dividend + out (Tensor, optional): the output tensor Example:: @@ -1419,10 +1435,10 @@ """) add_docstr(torch._C.frac, - """ + r""" frac(tensor, out=None) -> Tensor -Computes the fractional portion of each element in `tensor`. +Computes the fractional portion of each element in :attr:`tensor`. Example:: @@ -1431,7 +1447,7 @@ """) add_docstr(torch._C.from_numpy, - """ + r""" from_numpy(ndarray) -> Tensor Creates a :class:`Tensor` from a :class:`numpy.ndarray`. @@ -1452,7 +1468,7 @@ """) add_docstr(torch._C.gather, - """ + r""" gather(input, dim, index, out=None) -> Tensor Gathers values along an axis specified by `dim`. @@ -1470,10 +1486,10 @@ :attr:`out` will have the same size as :attr:`index`. Args: - input (Tensor): The source tensor - dim (int): The axis along which to index - index (LongTensor): The indices of elements to gather - out (Tensor, optional): Destination tensor + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + out (Tensor, optional): the destination tensor Example:: @@ -1485,23 +1501,21 @@ """) add_docstr(torch._C.ge, - """ + r""" ge(input, other, out=None) -> Tensor -Computes `tensor >= other` element-wise. +Computes `input >= other` element-wise. The second argument can be a number or a tensor whose shape is :ref:`broadcastable ` with the first argument. Args: - input (Tensor): Tensor to compare - other (Tensor or float): Tensor or value to compare - out (Tensor, optional): Output tensor. Must be a `ByteTensor` or the same - type as `tensor`. + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input` Returns: - Tensor: a ``torch.ByteTensor`` containing a 1 at each location where - comparison is true. + Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true Example:: @@ -1540,12 +1554,12 @@ column. Args: - B (Tensor): The matrix :math:`B` - A (Tensor): The :math:`m` by :math:`n` matrix :math:`A` - out (tuple, optional): Optional destination tensor + B (Tensor): the matrix :math:`B` + A (Tensor): the :math:`m` by :math:`n` matrix :math:`A` + out (tuple, optional): the optional destination tensor Returns: - (Tensor, Tensor): tuple containing: + (Tensor, Tensor): A tuple containing: - **X** (*Tensor*): the least squares solution - **qr** (*Tensor*): the details of the QR factorization @@ -1595,7 +1609,7 @@ Args: input (Tensor): the input matrix - out (tuple, optional): The result tuple of (Tensor, Tensor) + out (tuple, optional): the output tuple of (Tensor, Tensor) .. _LAPACK documentation: https://software.intel.com/en-us/node/521004 @@ -1603,18 +1617,18 @@ """) add_docstr(torch._C.ger, - """ + r""" ger(vec1, vec2, out=None) -> Tensor Outer product of :attr:`vec1` and :attr:`vec2`. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector of -size `m`, then :attr:`out` must be a matrix of size `n x m`. +size `m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`. .. note:: This function does not :ref:`broadcast `. Args: - vec1 (Tensor): 1D input vector - vec2 (Tensor): 1D input vector + vec1 (Tensor): 1-D input vector + vec2 (Tensor): 1-D input vector out (Tensor, optional): optional output matrix Example:: @@ -1632,7 +1646,7 @@ """) add_docstr(torch._C.gesv, - """ + r""" gesv(B, A, out=None) -> (Tensor, Tensor) `X, LU = torch.gesv(B, A)` returns the solution to the system of linear @@ -1640,10 +1654,10 @@ `LU` contains `L` and `U` factors for LU factorization of `A`. -:attr:`A` has to be a square and non-singular matrix (2D Tensor). +:attr:`A` has to be a square and non-singular matrix (2-D tensor). -If `A` is an `m x m` matrix and `B` is `m x k`, -the result `LU` is `m x m` and `X` is `m x k` . +If `A` is an :math:`(m \times m)` matrix and `B` is :math:`(m \times k)`, +the result `LU` is :math:`(m \times m)` and `X` is :math:`(m \times k)`. .. note:: @@ -1652,8 +1666,8 @@ instead of `(m, 1)`. Args: - B (Tensor): input matrix of `m x k` dimensions - A (Tensor): input square matrix of `m x m` dimensions + B (Tensor): input matrix of :math:`(m \times k)` dimensions + A (Tensor): input square matrix of :math:`(m \times m)` dimensions out (Tensor, optional): optional output matrix Example:: @@ -1673,30 +1687,28 @@ """) add_docstr(torch._C.get_num_threads, - """ + r""" get_num_threads() -> int Gets the number of OpenMP threads used for parallelizing CPU operations """) add_docstr(torch._C.gt, - """ + r""" gt(input, other, out=None) -> Tensor -Computes `tensor > other` element-wise. +Computes `input > other` element-wise. The second argument can be a number or a tensor whose shape is :ref:`broadcastable ` with the first argument. Args: - input (Tensor): Tensor to compare - other (Tensor or float): Tensor or value to compare - out (Tensor, optional): Output tensor. Must be a `ByteTensor` or the same - type as `tensor`. + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input` Returns: - Tensor: a ``torch.ByteTensor`` containing a 1 at each location where - comparison is true. + Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true Example:: @@ -1707,23 +1719,24 @@ """) add_docstr(torch._C.histc, - """ + r""" histc(input, bins=100, min=0, max=0, out=None) -> Tensor Computes the histogram of a tensor. -The elements are sorted into equal width bins between `min` and `max`. If `min` -and `max` are both zero, the minimum and maximum values of the data are used. +The elements are sorted into equal width bins between :attr:`min` and +:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and +maximum values of the data are used. Args: - input (Tensor): Input data - bins (int): Number of histogram bins - min (int): Lower end of the range (inclusive) - max (int): Upper end of the range (inclusive) - out (Tensor, optional): Output argument + input (Tensor): the input tensor + bins (int): number of histogram bins + min (int): lower end of the range (inclusive) + max (int): upper end of the range (inclusive) + out (Tensor, optional): the output tensor Returns: - Tensor: the histogram + Tensor: Histogram represented as a tensor Example:: @@ -1733,23 +1746,23 @@ """) add_docstr(torch._C.index_select, - """ + r""" index_select(input, dim, index, out=None) -> Tensor -Returns a new `Tensor` which indexes the :attr:`input` `Tensor` along dimension +Returns a new tensor which indexes the :attr:`input` tensor along dimension :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. -The returned `Tensor` has the same number of dimensions as -the original `Tensor`. +The returned tensor has the same number of dimensions as +the original tensor. -.. note:: The returned `Tensor` does **not** use the same storage as - the original `Tensor` +.. note:: The returned tensor does **not** use the same storage as + the original tensor Args: - input (Tensor): Input data + input (Tensor): the input tensor dim (int): the dimension in which we index - index (LongTensor): the 1D tensor containing the indices to index - out (Tensor, optional): Output argument + index (LongTensor): the 1-D tensor containing the indices to index + out (Tensor, optional): the output tensor Example:: @@ -1778,7 +1791,7 @@ """) add_docstr(torch._C.inverse, - """ + r""" inverse(input, out=None) -> Tensor Takes the inverse of the square matrix :attr:`input`. @@ -1789,8 +1802,8 @@ transposed, i.e. with strides `(1, m)` instead of `(m, 1)` Args: - input (Tensor): the input 2D square `Tensor` - out (Tensor, optional): the optional output `Tensor` + input (Tensor): the input 2-D square tensor + out (Tensor, optional): the optional output tensor Example:: @@ -1832,29 +1845,29 @@ """) add_docstr(torch._C.kthvalue, - """ + r""" kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor) -Returns the :attr:`k` th smallest element of the given :attr:`input` Tensor +Returns the :attr:`k` th smallest element of the given :attr:`input` tensor along a given dimension. If :attr:`dim` is not given, the last dimension of the `input` is chosen. A tuple of `(values, indices)` is returned, where the `indices` is the indices -of the kth-smallest element in the original `input` Tensor in dimension `dim`. +of the kth-smallest element in the original `input` tensor in dimension `dim`. -If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` Tensors +If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors are the same size as :attr:`input`, except in the dimension :attr:`dim` where they are of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in both the :attr:`values` and -:attr:`indices` Tensors having 1 fewer dimension than the :attr:`input` Tensor. +:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor k (int): k for the k-th smallest element - dim (int, optional): The dimension to find the kth value along - keepdim (bool): whether the output Tensors have :attr:`dim` retained or not - out (tuple, optional): The output tuple of (Tensor, LongTensor) + dim (int, optional): the dimension to find the kth value along + keepdim (bool): whether the output tensors have :attr:`dim` retained or not + out (tuple, optional): the output tuple of (Tensor, LongTensor) can be optionally given to be used as output buffers Example:: @@ -1896,23 +1909,21 @@ """) add_docstr(torch._C.le, - """ + r""" le(input, other, out=None) -> Tensor -Computes `tensor <= other` element-wise. +Computes `input <= other` element-wise. The second argument can be a number or a tensor whose shape is :ref:`broadcastable ` with the first argument. Args: - input (Tensor): Tensor to compare - other (Tensor or float): Tensor or value to compare - out (Tensor, optional): Output tensor. Must be a `ByteTensor` or the same - type as `tensor`. + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input` Returns: - Tensor: a ``torch.ByteTensor`` containing a 1 at each location where - comparison is true. + Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true Example:: @@ -1923,22 +1934,23 @@ """) add_docstr(torch._C.lerp, - """ + r""" lerp(start, end, weight, out=None) Does a linear interpolation of two tensors :attr:`start` and :attr:`end` based -on a scalar :attr:`weight`: and returns the resulting :attr:`out` Tensor. +on a scalar :attr:`weight` and returns the resulting :attr:`out` tensor. -:math:`out_i = start_i + weight * (end_i - start_i)` +.. math:: + out_i = start_i + weight \times (end_i - start_i) The shapes of :attr:`start` and :attr:`end` must be :ref:`broadcastable `. Args: - start (Tensor): the `Tensor` with the starting points - end (Tensor): the `Tensor` with the ending points + start (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points weight (float): the weight for the interpolation formula - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -1971,20 +1983,20 @@ """) add_docstr(torch._C.linspace, - """ + r""" linspace(start, end, steps=100, out=None) -> Tensor -Returns a one-dimensional Tensor of :attr:`steps` +Returns a one-dimensional tensor of :attr:`steps` equally spaced points between :attr:`start` and :attr:`end` -The output tensor is 1D of size :attr:`steps` +The output tensor is 1-D of size :attr:`steps` Args: - start (float): The starting value for the set of points - end (float): The ending value for the set of points - steps (int): Number of points to sample between :attr:`start` + start (float): the starting value for the set of points + end (float): the ending value for the set of points + steps (int): number of points to sample between :attr:`start` and :attr:`end` - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -2018,15 +2030,15 @@ """) add_docstr(torch._C.log, - """ + r""" log(input, out=None) -> Tensor -Returns a new `Tensor` with the natural logarithm of the elements +Returns a new tensor with the natural logarithm of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -2052,19 +2064,20 @@ """) add_docstr(torch._C.log1p, - """ + r""" log1p(input, out=None) -> Tensor -Returns a new `Tensor` with the natural logarithm of (1 + :attr:`input`). +Returns a new tensor with the natural logarithm of (1 + :attr:`input`). -:math:`y_i = log(x_i + 1)` +.. math:: + y_i = \log (x_i + 1) .. note:: This function is more accurate than :func:`torch.log` for small values of :attr:`input` Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -2090,20 +2103,20 @@ """) add_docstr(torch._C.logspace, - """ + r""" logspace(start, end, steps=100, out=None) -> Tensor -Returns a one-dimensional Tensor of :attr:`steps` points -logarithmically spaced between :math:`10^{start}` and :math:`10^{end}` +Returns a one-dimensional tensor of :attr:`steps` points +logarithmically spaced between :math:`10^{start}` and :math:`10^{end}`. -The output is a 1D tensor of size :attr:`steps` +The output is a 1-D tensor of size :attr:`steps` Args: - start (float): The starting value for the set of points - end (float): The ending value for the set of points - steps (int): Number of points to sample between + start (float): the starting value for the set of points + end (float): the ending value for the set of points + steps (int): number of points to sample between :attr:`start` and :attr:`end` - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -2128,23 +2141,21 @@ """) add_docstr(torch._C.lt, - """ + r""" lt(input, other, out=None) -> Tensor -Computes `tensor < other` element-wise. +Computes `input < other` element-wise. The second argument can be a number or a tensor whose shape is :ref:`broadcastable ` with the first argument. Args: - input (Tensor): Tensor to compare - other (Tensor or float): Tensor or value to compare - out (Tensor, optional): Output tensor. Must be a `ByteTensor` or - the same type as `tensor`. + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input` Returns: - Tensor: a ``torch.ByteTensor`` containing a 1 at each location where - comparison is true. + Tensor: A `torch.ByteTensor` containing a 1 at each location where comparison is true Example:: @@ -2155,22 +2166,22 @@ """) add_docstr(torch._C.masked_select, - """ + r""" masked_select(input, mask, out=None) -> Tensor -Returns a new 1D `Tensor` which indexes the :attr:`input` `Tensor` according to +Returns a new 1-D tensor which indexes the :attr:`input` tensor according to the binary mask :attr:`mask` which is a `ByteTensor`. The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need to match, but they must be :ref:`broadcastable `. -.. note:: The returned `Tensor` does **not** use the same storage - as the original `Tensor` +.. note:: The returned tensor does **not** use the same storage + as the original tensor Args: - input (Tensor): Input data + input (Tensor): the input data mask (ByteTensor): the tensor containing the binary mask to index with - out (Tensor, optional): Output argument + out (Tensor, optional): the output tensor Example:: @@ -2204,13 +2215,13 @@ """) add_docstr(torch._C.max, - """ + r""" .. function:: max(input) -> float -Returns the maximum value of all elements in the :attr:`input` Tensor. +Returns the maximum value of all elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -2226,20 +2237,20 @@ .. function:: max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor) -Returns the maximum value of each row of the :attr:`input` Tensor in the given +Returns the maximum value of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. The second return value is the index location of each maximum value found (argmax). -If :attr:`keepdim` is ``True``, the output Tensors are of the same size +If :attr:`keepdim` is ``True``, the output tensors are of the same size as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting -in the output Tensors having 1 fewer dimension than :attr:`input`. +in the output tensors having 1 fewer dimension than :attr:`input`. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool): whether the output Tensors have :attr:`dim` retained or not - out (tuple, optional): the result tuple of two output Tensors (max, max_indices) + keepdim (bool): whether the output tensors have :attr:`dim` retained or not + out (tuple, optional): the result tuple of two output tensors (max, max_indices) Example:: @@ -2269,21 +2280,22 @@ .. function:: max(input, other, out=None) -> Tensor -Each element of the Tensor :attr:`input` is compared with the corresponding -element of the Tensor :attr:`other` and an element-wise `max` is taken. +Each element of the tensor :attr:`input` is compared with the corresponding +element of the tensor :attr:`other` and an element-wise `max` is taken. The shapes of :attr:`input` and :attr:`other` don't need to match, but they must be :ref:`broadcastable `. +.. math:: + out_i = \max(tensor_i, other_i) + .. note:: When the shapes do not match, the shape of the returned output tensor follows the :ref:`broadcasting rules `. -:math:`out_i = max(tensor_i, other_i)` - Args: - input (Tensor): the input `Tensor` - other (Tensor): the second input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + other (Tensor): the second input tensor + out (Tensor, optional): the output tensor Example:: @@ -2316,13 +2328,13 @@ """) add_docstr(torch._C.mean, - """ + r""" .. function:: mean(input) -> float -Returns the mean value of all elements in the :attr:`input` Tensor. +Returns the mean value of all elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -2338,20 +2350,19 @@ .. function:: mean(input, dim, keepdim=False, out=None) -> Tensor -Returns the mean value of each row of the :attr:`input` Tensor in the given +Returns the mean value of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. -If :attr:`keepdim` is ``True``, the output Tensor is of the same size +If :attr:`keepdim` is ``True``, the output tensor is of the same size as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the -output Tensor having 1 fewer dimension. +output tensor having 1 fewer dimension. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool, optional): whether the output tensor has :attr:`dim` - retained or not - out (Tensor): the result Tensor + keepdim (bool, optional): whether the output tensor has :attr:`dim` retained or not + out (Tensor): the output tensor Example:: @@ -2383,13 +2394,13 @@ """) add_docstr(torch._C.median, - """ + r""" .. function:: median(input) -> float -Returns the median value of all elements in the :attr:`input` Tensor. +Returns the median value of all elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -2405,23 +2416,23 @@ .. function:: median(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor) -Returns the median value of each row of the :attr:`input` Tensor in the given +Returns the median value of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. Also returns the index location of the median value as a `LongTensor`. -By default, :attr:`dim` is the last dimension of the :attr:`input` Tensor. +By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. -If :attr:`keepdim` is ``True``, the output Tensors are of the same size +If :attr:`keepdim` is ``True``, the output tensors are of the same size as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in -the outputs Tensor having 1 fewer dimension than :attr:`input`. +the outputs tensor having 1 fewer dimension than :attr:`input`. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool): whether the output Tensors have :attr:`dim` retained or not - values (Tensor, optional): the result Tensor - indices (Tensor, optional): the result index Tensor + keepdim (bool): whether the output tensors have :attr:`dim` retained or not + values (Tensor, optional): the output tensor + indices (Tensor, optional): the output index tensor Example:: @@ -2460,13 +2471,13 @@ """) add_docstr(torch._C.min, - """ + r""" .. function:: min(input) -> float -Returns the minimum value of all elements in the :attr:`input` Tensor. +Returns the minimum value of all elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -2482,20 +2493,20 @@ .. function:: min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor) -Returns the minimum value of each row of the :attr:`input` Tensor in the given +Returns the minimum value of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. The second return value is the index location of each minimum value found (argmin). -If :attr:`keepdim` is ``True``, the output Tensors are of the same size as +If :attr:`keepdim` is ``True``, the output tensors are of the same size as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in -the output Tensors having 1 fewer dimension than :attr:`input`. +the output tensors having 1 fewer dimension than :attr:`input`. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce keepdim (bool): whether the output tensors have :attr:`dim` retained or not - out (tuple, optional): the result tuple of two output Tensors (min, min_indices) + out (tuple, optional): the tuple of two output tensors (min, min_indices) Example:: @@ -2524,22 +2535,23 @@ .. function:: min(input, other, out=None) -> Tensor -Each element of the Tensor :attr:`input` is compared with the corresponding -element of the Tensor :attr:`other` and an element-wise `min` is taken. -The resulting Tensor is returned. +Each element of the tensor :attr:`input` is compared with the corresponding +element of the tensor :attr:`other` and an element-wise `min` is taken. +The resulting tensor is returned. The shapes of :attr:`input` and :attr:`other` don't need to match, but they must be :ref:`broadcastable `. +.. math:: + out_i = \min(tensor_i, other_i) + .. note:: When the shapes do not match, the shape of the returned output tensor follows the :ref:`broadcasting rules `. -:math:`out_i = min(tensor_i, other_i)` - Args: - input (Tensor): the input `Tensor` - other (Tensor): the second input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + other (Tensor): the second input tensor + out (Tensor, optional): the output tensor Example:: @@ -2572,21 +2584,21 @@ """) add_docstr(torch._C.mm, - """ + r""" mm(mat1, mat2, out=None) -> Tensor Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. -If :attr:`mat1` is a `n x m` Tensor, :attr:`mat2` is a `m x p` Tensor, -:attr:`out` will be a `n x p` Tensor. +If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. .. note:: This function does not :ref:`broadcast `. For broadcasting matrix products, see :func:`torch.matmul`. Args: - mat1 (Tensor): First matrix to be multiplied - mat2 (Tensor): Second matrix to be multiplied - out (Tensor, optional): Output tensor + mat1 (Tensor): the first matrix to be multiplied + mat2 (Tensor): the second matrix to be multiplied + out (Tensor, optional): the output tensor Example:: @@ -2599,28 +2611,28 @@ """) add_docstr(torch._C.mode, - """ + r""" mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor) -Returns the mode value of each row of the :attr:`input` Tensor in the given +Returns the mode value of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. Also returns the index location of the mode value as a `LongTensor`. -By default, :attr:`dim` is the last dimension of the :attr:`input` Tensor. +By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. -If :attr:`keepdim` is ``True``, the output Tensors are of the same size as +If :attr:`keepdim` is ``True``, the output tensors are of the same size as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting -in the output Tensors having 1 fewer dimension than :attr:`input`. +in the output tensors having 1 fewer dimension than :attr:`input`. .. note:: This function is not defined for ``torch.cuda.Tensor`` yet. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce keepdim (bool): whether the output tensors have :attr:`dim` retained or not - values (Tensor, optional): the result Tensor - indices (Tensor, optional): the result index Tensor + values (Tensor, optional): the output tensor + indices (Tensor, optional): the output index tensor Example:: @@ -2659,21 +2671,22 @@ """) add_docstr(torch._C.mul, - """ + r""" .. function:: mul(input, value, out=None) Multiplies each element of the input :attr:`input` with the scalar :attr:`value` and returns a new resulting tensor. -:math:`out = tensor * value` +.. math:: + out_i = value \times input_i If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value` should be a real number, otherwise it should be an integer Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor value (Number): the number to be multiplied to each element of :attr:`input` - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -2695,18 +2708,19 @@ .. function:: mul(input, other, out=None) -Each element of the Tensor :attr:`input` is multiplied by each element of the -Tensor :attr:`other`. The resulting Tensor is returned. +Each element of the tensor :attr:`input` is multiplied by each element of the +Tensor :attr:`other`. The resulting tensor is returned. The shapes of :attr:`input` and :attr:`other` must be :ref:`broadcastable `. -:math:`out_i = input_i * other_i` +.. math:: + out_i = input_i \times other_i Args: - input (Tensor): the first multiplicand `Tensor` - other (Tensor): the second multiplicand `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the first multiplicand tensor + other (Tensor): the second multiplicand tensor + out (Tensor, optional): the output tensor Example:: @@ -2740,9 +2754,9 @@ u""" multinomial(input, num_samples, replacement=False, out=None) -> LongTensor -Returns a Tensor where each row +Returns a tensor where each row contains :attr:`num_samples` indices sampled from the multinomial probability -distribution located in the corresponding row of Tensor :attr:`input`. +distribution located in the corresponding row of tensor :attr:`input`. .. note:: The rows of :attr:`input` do not need to sum to one (in which case we use @@ -2765,14 +2779,14 @@ :attr:`input` length (or number of columns of :attr:`input` if it is a matrix). Args: - input (Tensor): Tensor containing probabilities + input (Tensor): the input tensor containing probabilities num_samples (int): number of samples to draw - replacement (bool, optional): Whether to draw with replacement or not - out (Tensor, optional): The result `Tensor` + replacement (bool, optional): whether to draw with replacement or not + out (Tensor, optional): the output tensor Example:: - >>> weights = torch.Tensor([0, 10, 3, 0]) # create a Tensor of weights + >>> weights = torch.Tensor([0, 10, 3, 0]) # create a tensor of weights >>> torch.multinomial(weights, 4) 1 @@ -2792,21 +2806,21 @@ """) add_docstr(torch._C.mv, - """ + r""" mv(mat, vec, out=None) -> Tensor Performs a matrix-vector product of the matrix :attr:`mat` and the vector :attr:`vec`. -If :attr:`mat` is a `n x m` Tensor, :attr:`vec` is a 1D Tensor of size `m`, -:attr:`out` will be 1D of size `n`. +If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +size `m`, :attr:`out` will be 1-D of size `n`. .. note:: This function does not :ref:`broadcast `. Args: mat (Tensor): matrix to be multiplied vec (Tensor): vector to be multiplied - out (Tensor, optional): Output tensor + out (Tensor, optional): the output tensor Example:: @@ -2819,23 +2833,21 @@ """) add_docstr(torch._C.ne, - """ + r""" ne(input, other, out=None) -> Tensor -Computes `tensor != other` element-wise. +Computes `input != other` element-wise. The second argument can be a number or a tensor whose shape is :ref:`broadcastable ` with the first argument. Args: - input (Tensor): Tensor to compare - other (Tensor or float): Tensor or value to compare - out (Tensor, optional): Output tensor. Must be a `ByteTensor` or the same - type as `tensor`. + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as `input` Returns: - Tensor: a ``torch.ByteTensor`` containing a 1 at each location where - comparison is true. + Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true. Example:: @@ -2846,16 +2858,17 @@ """) add_docstr(torch._C.neg, - """ + r""" neg(input, out=None) -> Tensor -Returns a new `Tensor` with the negative of the elements of :attr:`input`. +Returns a new tensor with the negative of the elements of :attr:`input`. -:math:`out = -1 * input` +.. math:: + out = -1 \times input Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -2881,20 +2894,20 @@ """) add_docstr(torch._C.nonzero, - """ + r""" nonzero(input, out=None) -> LongTensor Returns a tensor containing the indices of all non-zero elements of :attr:`input`. Each row in the result contains the indices of a non-zero element in :attr:`input`. -If :attr:`input` has `n` dimensions, then the resulting indices Tensor -:attr:`out` is of size `z x n`, where `z` is the total number of non-zero -elements in the :attr:`input` Tensor. +If :attr:`input` has `n` dimensions, then the resulting indices tensor +:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` - out (LongTensor, optional): The result `Tensor` containing indices + input (Tensor): the input tensor + out (LongTensor, optional): the output tensor containing indices Example:: @@ -2920,13 +2933,13 @@ """) add_docstr(torch._C.norm, - """ + r""" .. function:: norm(input, p=2) -> float -Returns the p-norm of the :attr:`input` Tensor. +Returns the p-norm of the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor p (float, optional): the exponent value in the norm formulation Example:: @@ -2942,20 +2955,20 @@ .. function:: norm(input, p, dim, keepdim=False, out=None) -> Tensor -Returns the p-norm of each row of the :attr:`input` Tensor in the given +Returns the p-norm of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. -If :attr:`keepdim` is ``True``, the output Tensor is of the same size as +If :attr:`keepdim` is ``True``, the output tensor is of the same size as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting -in the output Tensor having 1 fewer dimension than :attr:`input`. +in the output tensor having 1 fewer dimension than :attr:`input`. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor p (float): the exponent value in the norm formulation dim (int): the dimension to reduce - keepdim (bool): whether the output Tensor has :attr:`dim` retained or not - out (Tensor, optional): the result Tensor + keepdim (bool): whether the output tensor has :attr:`dim` retained or not + out (Tensor, optional): the output tensor Example:: @@ -2987,28 +3000,28 @@ """) add_docstr(torch._C.normal, - """ + r""" .. function:: normal(means, std, out=None) -Returns a Tensor of random numbers drawn from separate normal distributions +Returns a tensor of random numbers drawn from separate normal distributions who's mean and standard deviation are given. -The :attr:`means` is a Tensor with the mean of +The :attr:`means` is a tensor with the mean of each output element's normal distribution -The :attr:`std` is a Tensor with the standard deviation of +The :attr:`std` is a tensor with the standard deviation of each output element's normal distribution The shapes of :attr:`means` and :attr:`std` don't need to match. -The total number of elements in each Tensor need to be the same. +The total number of elements in each tensor need to be the same. .. note:: When the shapes do not match, the shape of :attr:`means` - is used as the shape for the returned output Tensor + is used as the shape for the returned output tensor Args: - means (Tensor): the Tensor of per-element means - std (Tensor): the Tensor of per-element standard deviations - out (Tensor): the optional result Tensor + means (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + out (Tensor, optional): the output tensor Example:: @@ -3033,8 +3046,8 @@ Args: means (float, optional): the mean for all distributions - std (Tensor): the Tensor of per-element standard deviations - out (Tensor): the optional result Tensor + std (Tensor): the tensor of per-element standard deviations + out (Tensor, optional): the output tensor Example:: @@ -3053,9 +3066,9 @@ all drawn elements. Args: - means (Tensor): the Tensor of per-element means + means (Tensor): the tensor of per-element means std (float, optional): the standard deviation for all distributions - out (Tensor): the optional result Tensor + out (Tensor, optional): the output tensor Example:: @@ -3071,13 +3084,13 @@ """) add_docstr(torch._C.numel, - """ + r""" numel(input) -> int -Returns the total number of elements in the :attr:`input` Tensor. +Returns the total number of elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -3091,15 +3104,15 @@ """) add_docstr(torch._C.ones, - """ + r""" ones(*sizes, out=None) -> Tensor -Returns a Tensor filled with the scalar value `1`, with the shape defined +Returns a tensor filled with the scalar value `1`, with the shape defined by the varargs :attr:`sizes`. Args: - sizes (int...): a set of ints defining the shape of the output Tensor. - out (Tensor, optional): the result Tensor + sizes (int...): a set of integers defining the shape of the output tensor + out (Tensor, optional): the output tensor Example:: @@ -3121,14 +3134,15 @@ """) add_docstr(torch._C.ones_like, - """ + r""" ones_like(input, out=None) -> Tensor -Returns a Tensor filled with the scalar value `1`, with the same size as :attr:`input`. +Returns a tensor filled with the scalar value `1`, with the same size as +:attr:`input`. Args: - input (Tensor): The size of the input will determine the size of the output. - out (Tensor, optional): the result Tensor + input (Tensor): the size of :attr:`input` will determine size of the output tensor + out (Tensor, optional): the output tensor Example:: @@ -3141,18 +3155,18 @@ """) add_docstr(torch._C.orgqr, - """ + r""" orgqr(a, tau) -> Tensor -Computes the orthogal matrix `Q` of a QR factorization, from the `(a, tau)` tuple -returned by :func:`torch.geqrf`. +Computes the orthogal matrix `Q` of a QR factorization, from the `(a, tau)` +tuple returned by :func:`torch.geqrf`. This directly calls the underlying LAPACK function `?orgqr`. See `?orgqr LAPACK documentation`_ for further details. Args: - a (Tensor): The `a` from :func:`torch.geqrf`. - tau (Tensor): The `tau` from `torch.geqrf`. + a (Tensor): the `a` from :func:`torch.geqrf`. + tau (Tensor): the `tau` from `torch.geqrf`. .. _?orgqr LAPACK documentation: https://software.intel.com/en-us/mkl-developer-reference-c-orgqr @@ -3160,7 +3174,7 @@ """) add_docstr(torch._C.ormqr, - """ + r""" ormqr(a, tau, mat, left=True, transpose=False) -> (Tensor, Tensor) Multiplies `mat` by the orthogonal `Q` matrix of the QR factorization @@ -3175,7 +3189,7 @@ """) add_docstr(torch._C.potrf, - """ + r""" potrf(a, out=None) potrf(a, upper, out=None) @@ -3187,9 +3201,9 @@ such that :math:`a = u u^T`. Args: - a (Tensor): the input 2D `Tensor`, a symmetric positive semidefinite matrix - upper (bool, optional): Return upper (default) or lower triangular matrix - out (Tensor, optional): A Tensor for u + a (Tensor): the input 2-D tensor, a symmetric positive semidefinite matrix + upper (bool, optional): whether to return a upper (default) or lower triangular matrix + out (Tensor, optional): the output tensor for `u` Example:: @@ -3220,7 +3234,7 @@ """) add_docstr(torch._C.potri, - """ + r""" potri(u, out=None) potri(u, upper, out=None) @@ -3232,10 +3246,10 @@ such that :math:`inv = (u u^T)^{-1}`. Args: - u (Tensor): the input 2D `Tensor`, a upper or lower triangular + u (Tensor): the input 2-D tensor, a upper or lower triangular Cholesky factor - upper (bool, optional): Flag if upper (default) or lower triangular matrix - out (Tensor, optional): A Tensor for inv + upper (bool, optional): whether to return a upper (default) or lower triangular matrix + out (Tensor, optional): the output tensor for `inv` Example:: @@ -3266,7 +3280,7 @@ """) add_docstr(torch._C.potrs, - """ + r""" potrs(b, u, out=None) potrs(b, u, upper, out=None) @@ -3278,14 +3292,13 @@ If `upper` is ``False``, `u` is and lower triangular such that :math:`c = (u u^T)^{-1} b`. -.. note:: `b` is always a 2D `Tensor`, use `b.unsqueeze(1)` to convert a vector. +.. note:: `b` is always a 2-D tensor, use `b.unsqueeze(1)` to convert a vector. Args: - b (Tensor): the right hand side 2D `Tensor` - u (Tensor): the input 2D `Tensor`, a upper or lower triangular - Cholesky factor - upper (bool, optional): Return upper (default) or lower triangular matrix - out (Tensor, optional): A Tensor for c + b (Tensor): the right hand side 2-D tensor + u (Tensor): the input 2-D tensor, a upper or lower triangular Cholesky factor + upper (bool, optional): whether to return a upper (default) or lower triangular matrix + out (Tensor, optional): the output tensor for `c` Example:: @@ -3324,30 +3337,32 @@ """) add_docstr(torch._C.pow, - """ + r""" .. function:: pow(input, exponent, out=None) Takes the power of each element in :attr:`input` with :attr:`exponent` and -returns a Tensor with the result. +returns a tensor with the result. -:attr:`exponent` can be either a single ``float`` number or a ``Tensor`` +:attr:`exponent` can be either a single ``float`` number or a `Tensor` with the same number of elements as :attr:`input`. When :attr:`exponent` is a scalar value, the operation applied is: -:math:`out_i = x_i ^ {exponent}` +.. math:: + out_i = x_i ^ {exponent} -When :attr:`exponent` is a Tensor, the operation applied is: +When :attr:`exponent` is a tensor, the operation applied is: -:math:`out_i = x_i ^ {exponent_i}` +.. math:: + out_i = x_i ^ {exponent_i} -When :attr:`exponent` is a Tensor, the shapes of :attr:`input` +When :attr:`exponent` is a tensor, the shapes of :attr:`input` and :attr:`exponent` must be :ref:`broadcastable `. Args: - input (Tensor): the input `Tensor` - exponent (float or Tensor): the exponent value - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + exponent (float or tensor): the exponent value + out (Tensor, optional): the output tensor Example:: @@ -3397,17 +3412,18 @@ .. function:: pow(base, input, out=None) -:attr:`base` is a scalar ``float`` value, and :attr:`input` is a Tensor. -The returned Tensor :attr:`out` is of the same shape as :attr:`input` +:attr:`base` is a scalar ``float`` value, and :attr:`input` is a tensor. +The returned tensor :attr:`out` is of the same shape as :attr:`input` The operation applied is: -:math:`out_i = base ^ {input_i}` +.. math:: + out_i = base ^ {input_i} Args: base (float): the scalar base value for the power operation - input (Tensor): the exponent `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the exponent tensor + out (Tensor, optional): the output tensor Example:: @@ -3424,13 +3440,13 @@ """) add_docstr(torch._C.prod, - """ + r""" .. function:: prod(input) -> float -Returns the product of all elements in the :attr:`input` Tensor. +Returns the product of all elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -3446,19 +3462,19 @@ .. function:: prod(input, dim, keepdim=False, out=None) -> Tensor -Returns the product of each row of the :attr:`input` Tensor in the given +Returns the product of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. -If :attr:`keepdim` is ``True``, the output Tensor is of the same size as +If :attr:`keepdim` is ``True``, the output tensor is of the same size as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting -in the output Tensor having 1 fewer dimension than :attr:`input`. +in the output tensor having 1 fewer dimension than :attr:`input`. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool): whether the output Tensor has :attr:`dim` retained or not - out (Tensor, optional): the result Tensor + keepdim (bool): whether the output tensor has :attr:`dim` retained or not + out (Tensor, optional): the output tensor Example:: @@ -3482,7 +3498,7 @@ """) add_docstr(torch._C.pstrf, - """ + r""" pstrf(a, out=None) pstrf(a, upper, out=None) @@ -3494,9 +3510,9 @@ such that :math:`a = p^T u u^T p`. Args: - a (Tensor): the input 2D `Tensor` - upper (bool, optional): Return upper (default) or lower triangular matrix - out (tuple, optional): A tuple of u and piv Tensors + a (Tensor): the input 2-D tensor + upper (bool, optional): whether to return a upper (default) or lower triangular matrix + out (tuple, optional): tuple of `u` and `piv` tensors Example:: @@ -3535,11 +3551,11 @@ """) add_docstr(torch._C.qr, - """ + r""" qr(input, out=None) -> (Tensor, Tensor) Computes the QR decomposition of a matrix :attr:`input`: returns matrices -`q` and `r` such that :math:`x = q * r`, with `q` being an orthogonal matrix +`q` and `r` such that :math:`x = q r`, with `q` being an orthogonal matrix and `r` being an upper triangular matrix. This returns the thin (reduced) QR factorization. @@ -3555,8 +3571,8 @@ transposed, i.e. with strides `(1, m)` instead of `(m, 1)`. Args: - input (Tensor): the input 2D `Tensor` - out (tuple, optional): A tuple of Q and R Tensors + input (Tensor): the input 2-D tensor + out (tuple, optional): tuple of `Q` and `R` tensors Example:: @@ -3593,17 +3609,17 @@ """) add_docstr(torch._C.rand, - """ + r""" rand(*sizes, out=None) -> Tensor -Returns a Tensor filled with random numbers from a uniform distribution +Returns a tensor filled with random numbers from a uniform distribution on the interval :math:`[0, 1)` -The shape of the Tensor is defined by the varargs :attr:`sizes`. +The shape of the tensor is defined by the varargs :attr:`sizes`. Args: - sizes (int...): a set of ints defining the shape of the output Tensor. - out (Tensor, optional): the result Tensor + sizes (int...): a set of ints defining the shape of the output tensor. + out (Tensor, optional): the output tensor Example:: @@ -3624,17 +3640,17 @@ """) add_docstr(torch._C.randn, - """ + r""" randn(*sizes, out=None) -> Tensor -Returns a Tensor filled with random numbers from a normal distribution +Returns a tensor filled with random numbers from a normal distribution with zero mean and variance of one. -The shape of the Tensor is defined by the varargs :attr:`sizes`. +The shape of the tensor is defined by the varargs :attr:`sizes`. Args: - sizes (int...): a set of ints defining the shape of the output Tensor. - out (Tensor, optional): the result Tensor + sizes (int...): a set of ints defining the shape of the output tensor. + out (Tensor, optional): the output tensor Example:: @@ -3655,7 +3671,7 @@ """) add_docstr(torch._C.randperm, - """ + r""" randperm(n, out=None) -> LongTensor Returns a random permutation of integers from ``0`` to ``n - 1``. @@ -3675,21 +3691,21 @@ """) add_docstr(torch._C.range, - """ + r""" range(start, end, step=1, out=None) -> Tensor -Returns a 1D Tensor of size :math:`floor((end - start) / step) + 1` with values -from :attr:`start` to :attr:`end` with step :attr:`step`. Step is the gap -between two values in the tensor. :math:`x_{i+1} = x_i + step` +Returns a 1-D tensor of size :math:`\lfloor \frac{end - start}{step} \rfloor + 1` +with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is +the gap between two values in the tensor. :math:`x_{i+1} = x_i + step`. Warning: This function is deprecated in favor of :func:`torch.arange`. Args: - start (float): The starting value for the set of points - end (float): The ending value for the set of points - step (float): The gap between each pair of adjacent points - out (Tensor, optional): The result `Tensor` + start (float): the starting value for the set of points + end (float): the ending value for the set of points + step (float): the gap between each pair of adjacent points + out (Tensor, optional): the output tensor Example:: @@ -3715,18 +3731,18 @@ """) add_docstr(torch._C.arange, - """ + r""" arange(start=0, end, step=1, out=None) -> Tensor -Returns a 1D Tensor of size :math:`floor((end - start) / step)` with values -from the interval ``[start, end)`` taken with step :attr:`step` starting -from `start`. +Returns a 1-D tensor of size :math:`\lfloor \frac{end - start}{step} \rfloor` +with values from the interval ``[start, end)`` taken with step :attr:`step` +starting from `start`. Args: - start (float): The starting value for the set of points - end (float): The ending value for the set of points - step (float): The gap between each pair of adjacent points - out (Tensor, optional): The result `Tensor` + start (float): the starting value for the set of points + end (float): the ending value for the set of points + step (float): the gap between each pair of adjacent points + out (Tensor, optional): the output tensor Example:: @@ -3757,7 +3773,7 @@ add_docstr(torch._C.remainder, - """ + r""" remainder(input, divisor, out=None) -> Tensor Computes the element-wise remainder of division. @@ -3765,14 +3781,14 @@ The divisor and dividend may contain both for integer and floating point numbers. The remainder has the same sign as the divisor. -When :attr:`divisor` is a Tensor, the shapes of :attr:`input` and +When :attr:`divisor` is a tensor, the shapes of :attr:`input` and :attr:`divisor` must be :ref:`broadcastable `. Args: - input (Tensor): The dividend - divisor (Tensor or float): The divisor. This may be either a number or a - tensor of the same shape as the dividend. - out (Tensor, optional): Output tensor + input (Tensor): the dividend + divisor (Tensor or float): the divisor that may be either a number or a + Tensor of the same shape as the dividend + out (Tensor, optional): the output tensor Example:: @@ -3788,21 +3804,21 @@ """) add_docstr(torch._C.renorm, - """ + r""" renorm(input, p, dim, maxnorm, out=None) -> Tensor -Returns a Tensor where each sub-tensor of :attr:`input` along dimension +Returns a tensor where each sub-tensor of :attr:`input` along dimension :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower than the value :attr:`maxnorm` .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged Args: - input (Tensor): The input Tensor - p (float): The power for the norm computation - dim (int): The dimension to slice over to get the sub-tensors - maxnorm (float): The maximum norm to keep each sub-tensor under - out (Tensor, optional): Output tensor + input (Tensor): the input tensor + p (float): the power for the norm computation + dim (int): the dimension to slice over to get the sub-tensors + maxnorm (float): the maximum norm to keep each sub-tensor under + out (Tensor, optional): the output tensor Example:: @@ -3826,15 +3842,15 @@ """) add_docstr(torch._C.round, - """ + r""" round(input, out=None) -> Tensor -Returns a new `Tensor` with each of the elements of :attr:`input` rounded +Returns a new tensor with each of the elements of :attr:`input` rounded to the closest integer. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -3858,15 +3874,15 @@ """) add_docstr(torch._C.rsqrt, - """ + r""" rsqrt(input, out=None) -> Tensor -Returns a new `Tensor` with the reciprocal of the square-root of each of +Returns a new tensor with the reciprocal of the square-root of each of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -3890,21 +3906,21 @@ """) add_docstr(torch._C.set_num_threads, - """ + r""" set_num_threads(int) Sets the number of OpenMP threads used for parallelizing CPU operations """) add_docstr(torch._C.sigmoid, - """ + r""" sigmoid(input, out=None) -> Tensor -Returns a new `Tensor` with the sigmoid of the elements of :attr:`input`. +Returns a new tensor with the sigmoid of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -3928,14 +3944,14 @@ """) add_docstr(torch._C.sign, - """ + r""" sign(input, out=None) -> Tensor -Returns a new `Tensor` with the sign of the elements of :attr:`input`. +Returns a new tensor with the sign of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -3953,19 +3969,18 @@ 1 1 1 - [torch.FloatTensor of size 4] """) add_docstr(torch._C.sin, - """ + r""" sin(input, out=None) -> Tensor -Returns a new `Tensor` with the sine of the elements of :attr:`input`. +Returns a new tensor with the sine of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -3986,15 +4001,15 @@ """) add_docstr(torch._C.sinh, - """ + r""" sinh(input, out=None) -> Tensor -Returns a new `Tensor` with the hyperbolic sine of the elements of +Returns a new tensor with the hyperbolic sine of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -4015,10 +4030,10 @@ """) add_docstr(torch._C.sort, - """ + r""" sort(input, dim=None, descending=False, out=None) -> (Tensor, LongTensor) -Sorts the elements of the :attr:`input` Tensor along a given dimension +Sorts the elements of the :attr:`input` tensor along a given dimension in ascending order by value. If :attr:`dim` is not given, the last dimension of the `input` is chosen. @@ -4027,15 +4042,13 @@ order by value. A tuple of (sorted_tensor, sorted_indices) is returned, where the -sorted_indices are the indices of the elements in the original `input` Tensor. +sorted_indices are the indices of the elements in the original `input` tensor. Args: - input (Tensor): the input `Tensor` - dim (int, optional): The dimension to sort along - descending (bool, optional): Controls the sorting order - (ascending or descending) - out (tuple, optional): The output tuple of (Tensor, LongTensor) - can be optionally given to be used as output buffers + input (Tensor): the input tensor + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can be optionally given to be used as output buffers Example:: @@ -4073,14 +4086,14 @@ """) add_docstr(torch._C.sqrt, - """ + r""" sqrt(input, out=None) -> Tensor -Returns a new `Tensor` with the square-root of the elements of :attr:`input`. +Returns a new tensor with the square-root of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -4107,27 +4120,28 @@ r""" squeeze(input, dim=None, out=None) -Returns a `Tensor` with all the dimensions of :attr:`input` of size `1` removed. +Returns a tensor with all the dimensions of :attr:`input` of size `1` removed. -If `input` is of shape: :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` Tensor -will be of shape: :math:`(A \times B \times C \times D)` +For example, if `input` is of shape: +:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor +will be of shape: :math:`(A \times B \times C \times D)`. When :attr:`dim` is given, a squeeze operation is done only in the given -dimension. If `input` is of shape: :math:`(A \times 1 \times B)`, `squeeze(input, 0)` -leaves the Tensor unchanged, but `squeeze(input, 1)` will squeeze the tensor -to the shape :math:`(A \times B)`. +dimension. If `input` is of shape: :math:`(A \times 1 \times B)`, +`squeeze(input, 0)` leaves the tensor unchanged, but `squeeze(input, 1)` will +squeeze the tensor to the shape :math:`(A \times B)`. .. note:: As an exception to the above, a 1-dimensional tensor of size 1 will not have its dimensions changed. -.. note:: The returned Tensor shares the storage with the input Tensor, +.. note:: The returned tensor shares the storage with the input tensor, so changing the contents of one will change the contents of the other. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int, optional): if given, the input will be squeezed only in this dimension - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -4146,16 +4160,16 @@ """) add_docstr(torch._C.std, - """ + r""" .. function:: std(input, unbiased=True) -> float -Returns the standard-deviation of all elements in the :attr:`input` Tensor. +Returns the standard-deviation of all elements in the :attr:`input` tensor. -If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated via -the biased estimator. Otherwise, Bessel's correction will be used. +If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated +via the biased estimator. Otherwise, Bessel's correction will be used. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor unbiased (bool): whether to use the unbiased estimation or not Example:: @@ -4172,23 +4186,23 @@ .. function:: std(input, dim, keepdim=False, unbiased=True, out=None) -> Tensor -Returns the standard-deviation of each row of the :attr:`input` Tensor in the +Returns the standard-deviation of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. -If :attr:`keepdim` is ``True``, the output Tensor is of the same size as +If :attr:`keepdim` is ``True``, the output tensor is of the same size as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting -in the output Tensor having 1 fewer dimension than :attr:`input`. +in the output tensor having 1 fewer dimension than :attr:`input`. -If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated via -the biased estimator. Otherwise, Bessel's correction will be used. +If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated +via the biased estimator. Otherwise, Bessel's correction will be used. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool): whether the output Tensor has :attr:`dim` retained or not + keepdim (bool): whether the output tensor has :attr:`dim` retained or not unbiased (bool): whether to use the unbiased estimation or not - out (Tensor, optional): the result Tensor + out (Tensor, optional): the output tensor Example:: @@ -4212,13 +4226,13 @@ """) add_docstr(torch._C.sum, - """ + r""" .. function:: sum(input) -> float -Returns the sum of all elements in the :attr:`input` Tensor. +Returns the sum of all elements in the :attr:`input` tensor. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor Example:: @@ -4234,19 +4248,19 @@ .. function:: sum(input, dim, keepdim=False, out=None) -> Tensor -Returns the sum of each row of the :attr:`input` Tensor in the given +Returns the sum of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. -If :attr:`keepdim` is ``True``, the output Tensor is of the same size +If :attr:`keepdim` is ``True``, the output tensor is of the same size as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in -the output Tensor having 1 fewer dimension than :attr:`input`. +the output tensor having 1 fewer dimension than :attr:`input`. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool): whether the output Tensor has :attr:`dim` retained or not - out (Tensor, optional): the result Tensor + keepdim (bool): whether the output tensor has :attr:`dim` retained or not + out (Tensor, optional): the output tensor Example:: @@ -4270,18 +4284,19 @@ """) add_docstr(torch._C.svd, - """ + r""" svd(input, some=True, out=None) -> (Tensor, Tensor, Tensor) `U, S, V = torch.svd(A)` returns the singular value decomposition of a -real matrix `A` of size `(n x m)` such that :math:`A = USV'*`. +real matrix `A` of size `(n x m)` such that :math:`A = USV^T`. -`U` is of shape `n x min(n, m)` +`U` is of shape :math:`(n \times \min(n, m))`. -`S` is a diagonal square matrix of shape `min(n, m) x min(n, m)`, represented as -a vector of shape `(min(n, m),)` containing its diagonal entries. +`S` is a diagonal matrix of shape :math:`(\min(n, m) \times \min(n, m))`, +represented as a vector of size :math:`\min(n, m)` containing the diagonal +entries. -`V` is of shape `m x min(n, m)`. +`V` is of shape :math:`(m \times \min(n, m))`. :attr:`some` represents the number of singular values to be computed. If `some=True`, it computes some and `some=False` computes all. @@ -4300,9 +4315,9 @@ .. note:: Double backward through :meth:`~torch.svd` is not supported currently. Args: - input (Tensor): the input 2D Tensor + input (Tensor): the input 2-D tensor some (bool, optional): controls the number of singular values to be computed - out (tuple, optional): the result tuple + out (tuple, optional): the output tuple of tensors Example:: @@ -4356,16 +4371,17 @@ """) add_docstr(torch._C.symeig, - """ + r""" symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor) `e, V = torch.symeig(input)` returns eigenvalues and eigenvectors -of a symmetric real matrix :attr:`input`. +of a real symmetric matrix :attr:`input`. -`input` and `V` are `m x m` matrices and `e` is a `m` dimensional vector. +`input` and `V` are :math:`(m \times m)` matrices and `e` is a `m` dimensional +vector. This function calculates all eigenvalues (and vectors) of `input` -such that `input = V diag(e) V'` +such that :math:`input = V diag(e) V^T`. The boolean argument :attr:`eigenvectors` defines computation of eigenvectors or eigenvalues only. @@ -4383,11 +4399,9 @@ Args: input (Tensor): the input symmetric matrix - eigenvectors(boolean, optional): controls whether eigenvectors have - to be computed - upper(boolean, optional): controls whether to consider upper-triangular or - lower-triangular region - out (tuple, optional): The result tuple of (Tensor, Tensor) + eigenvectors(boolean, optional): controls whether eigenvectors have to be computed + upper(boolean, optional): controls whether to consider upper-triangular or lower-triangular region + out (tuple, optional): the output tuple of (Tensor, Tensor) Examples:: @@ -4420,17 +4434,17 @@ """) add_docstr(torch._C.t, - """ + r""" t(input, out=None) -> Tensor -Expects :attr:`input` to be a matrix (2D Tensor) and transposes -dimensions 0 and 1. +Expects :attr:`input` to be a matrix (2-D tensor) and transposes dimensions 0 and +1. Can be seen as a short-hand function for `transpose(input, 0, 1)` Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -4450,16 +4464,16 @@ """) -add_docstr(torch._C.take, """\ +add_docstr(torch._C.take, r"""\ take(input, indices) -> Tensor -Returns a new `Tensor` with the elements of :attr:`input` at the given indices. -The input tensor is treated as if it were viewed as a 1D tensor. The result +Returns a new tensor with the elements of :attr:`input` at the given indices. +The input tensor is treated as if it were viewed as a 1-D tensor. The result takes the same shape as the indices. Args: - input (Tensor): the input `Tensor` - indices (LongTensor): the indices into `Tensor` + input (Tensor): the input tensor + indices (LongTensor): the indices into tensor Example:: @@ -4473,14 +4487,14 @@ """) add_docstr(torch._C.tan, - """ + r""" tan(input, out=None) -> Tensor -Returns a new `Tensor` with the tangent of the elements of :attr:`input`. +Returns a new tensor with the tangent of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -4501,15 +4515,15 @@ """) add_docstr(torch._C.tanh, - """ + r""" tanh(input, out=None) -> Tensor -Returns a new `Tensor` with the hyperbolic tangent of the elements +Returns a new tensor with the hyperbolic tangent of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -4530,10 +4544,10 @@ """) add_docstr(torch._C.topk, - """ + r""" topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor) -Returns the :attr:`k` largest elements of the given :attr:`input` Tensor along +Returns the :attr:`k` largest elements of the given :attr:`input` tensor along a given dimension. If :attr:`dim` is not given, the last dimension of the `input` is chosen. @@ -4541,21 +4555,20 @@ If :attr:`largest` is ``False`` then the `k` smallest elements are returned. A tuple of `(values, indices)` is returned, where the `indices` are the indices -of the elements in the original `input` Tensor. +of the elements in the original `input` tensor. The boolean option :attr:`sorted` if ``True``, will make sure that the returned `k` elements are themselves sorted Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor k (int): the k in "top-k" - dim (int, optional): The dimension to sort along - largest (bool, optional): Controls whether to return largest or + dim (int, optional): the dimension to sort along + largest (bool, optional): controls whether to return largest or smallest elements - sorted (bool, optional): Controls whether to return the elements + sorted (bool, optional): controls whether to return the elements in sorted order - out (tuple, optional): The output tuple of (Tensor, LongTensor) - can be optionally given to be used as output buffers + out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be optionally given to be used as output buffers Example:: @@ -4597,10 +4610,10 @@ """) add_docstr(torch._C.trace, - """ + r""" trace(input) -> float -Returns the sum of the elements of the diagonal of the input 2D matrix. +Returns the sum of the elements of the diagonal of the input 2-D matrix. Example:: @@ -4618,20 +4631,20 @@ """) add_docstr(torch._C.transpose, - """ + r""" transpose(input, dim0, dim1, out=None) -> Tensor -Returns a `Tensor` that is a transposed version of :attr:`input`. +Returns a tensor that is a transposed version of :attr:`input`. The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. -The resulting :attr:`out` Tensor shares it's underlying storage with the -:attr:`input` Tensor, so changing the content of one would change the content +The resulting :attr:`out` tensor shares it's underlying storage with the +:attr:`input` tensor, so changing the content of one would change the content of the other. Args: - input (Tensor): the input `Tensor` - dim0 (int): The first dimension to be transposed - dim1 (int): The second dimension to be transposed + input (Tensor): the input tensor + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed Example:: @@ -4652,25 +4665,25 @@ """) add_docstr(torch._C.tril, - """ + r""" tril(input, diagonal=0, out=None) -> Tensor -Returns the lower triangular part of the matrix (2D Tensor) :attr:`input`, -the other elements of the result Tensor :attr:`out` are set to 0. +Returns the lower triangular part of the matrix (2-D tensor) :attr:`input`, +the other elements of the result tensor :attr:`out` are set to 0. The lower triangular part of the matrix is defined as the elements on and below the diagonal. -The argument :attr:`diagonal` controls which diagonal to consider. +The argument :attr:`diagonal` controls which diagonal to consider: -- :attr:`diagonal` = 0, is the main diagonal. -- :attr:`diagonal` > 0, is above the main diagonal. -- :attr:`diagonal` < 0, is below the main diagonal. +- If :attr:`diagonal` = 0, it is the main diagonal. +- If :attr:`diagonal` > 0, it is above the main diagonal. +- If :attr:`diagonal` < 0, it is below the main diagonal. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor diagonal (int, optional): the diagonal to consider - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -4706,25 +4719,25 @@ """) add_docstr(torch._C.triu, - """ + r""" triu(input, diagonal=0, out=None) -> Tensor -Returns the upper triangular part of the matrix (2D Tensor) :attr:`input`, -the other elements of the result Tensor :attr:`out` are set to 0. +Returns the upper triangular part of the matrix (2-D tensor) :attr:`input`, +the other elements of the result tensor :attr:`out` are set to 0. The upper triangular part of the matrix is defined as the elements on and above the diagonal. -The argument :attr:`diagonal` controls which diagonal to consider. +The argument :attr:`diagonal` controls which diagonal to consider: -- :attr:`diagonal` = 0, is the main diagonal. -- :attr:`diagonal` > 0, is above the main diagonal. -- :attr:`diagonal` < 0, is below the main diagonal. +- If :attr:`diagonal` = 0, it is the main diagonal. +- If :attr:`diagonal` > 0, it is above the main diagonal. +- If :attr:`diagonal` < 0, it is below the main diagonal. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor diagonal (int, optional): the diagonal to consider - out (Tensor, optional): The result `Tensor` + out (Tensor, optional): the output tensor Example:: @@ -4760,7 +4773,7 @@ """) add_docstr(torch._C.trtrs, - """ + r""" trtrs(b, A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor) Solves a system of equations with a triangular coefficient matrix `A` @@ -4772,14 +4785,14 @@ This method is NOT implemented for CUDA tensors. Args: - A (Tensor): the input triangular coefficient matrix. + A (Tensor): the input triangular coefficient matrix b (Tensor): multiple right-hand sides. Each column of `b` is a right-hand side for the system of equations. - upper (bool, optional): Solves the upper-triangular system - of equations if True, lower-triangular if False. Default: True. - transpose (bool, optional): If `A` should be transposed before + upper (bool, optional): whether to solve the upper-triangular system + of equations (default) or the lower-triangular system of equations. Default: True. + transpose (bool, optional): whether `A` should be transposed before being sent into the solver. Default: False. - unitriangular (bool, optional): If `A` is unit triangular. + unitriangular (bool, optional): whether `A` is unit triangular. If True, the diagonal elements of `A` are assumed to be 1 and not referenced from `A`. Default: False. @@ -4823,15 +4836,15 @@ """) add_docstr(torch._C.trunc, - """ + r""" trunc(input, out=None) -> Tensor -Returns a new `Tensor` with the truncated integer values of +Returns a new tensor with the truncated integer values of the elements of :attr:`input`. Args: - input (Tensor): the input `Tensor` - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + out (Tensor, optional): the output tensor Example:: @@ -4855,7 +4868,7 @@ """) add_docstr(torch._C.unsqueeze, - """ + r""" unsqueeze(input, dim, out=None) Returns a new tensor with a dimension of size one inserted at the @@ -4867,9 +4880,9 @@ :math:`dim + input.dim() + 1` Args: - input (Tensor): the input `Tensor` - dim (int): The index at which to insert the singleton dimension - out (Tensor, optional): The result `Tensor` + input (Tensor): the input tensor + dim (int): the index at which to insert the singleton dimension + out (Tensor, optional): the output tensor Example: >>> x = torch.Tensor([1, 2, 3, 4]) @@ -4885,16 +4898,16 @@ """) add_docstr(torch._C.var, - """ + r""" .. function:: var(input, unbiased=True) -> float -Returns the variance of all elements in the :attr:`input` Tensor. +Returns the variance of all elements in the :attr:`input` tensor. If :attr:`unbiased` is ``False``, then the variance will be calculated via the biased estimator. Otherwise, Bessel's correction will be used. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor unbiased (bool): whether to use the unbiased estimation or not Example:: @@ -4911,23 +4924,23 @@ .. function:: var(input, dim, keepdim=False, unbiased=True, out=None) -> Tensor -Returns the variance of each row of the :attr:`input` Tensor in the given +Returns the variance of each row of the :attr:`input` tensor in the given dimension :attr:`dim`. -If :attr:`keepdim` is ``True``, the output Tensors are of the same size +If :attr:`keepdim` is ``True``, the output tensors are of the same size as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in -the outputs Tensor having 1 fewer dimension than :attr:`input`. +the outputs tensor having 1 fewer dimension than :attr:`input`. If :attr:`unbiased` is ``False``, then the variance will be calculated via the biased estimator. Otherwise, Bessel's correction will be used. Args: - input (Tensor): the input `Tensor` + input (Tensor): the input tensor dim (int): the dimension to reduce - keepdim (bool): whether the output Tensor has :attr:`dim` retained or not + keepdim (bool): whether the output tensor has :attr:`dim` retained or not unbiased (bool): whether to use the unbiased estimation or not - out (Tensor, optional): the result Tensor + out (Tensor, optional): the output tensor Example:: @@ -4951,15 +4964,15 @@ """) add_docstr(torch._C.zeros, - """ + r""" zeros(*sizes, out=None) -> Tensor -Returns a Tensor filled with the scalar value `0`, with the shape defined +Returns a tensor filled with the scalar value `0`, with the shape defined by the varargs :attr:`sizes`. Args: - sizes (int...): a set of ints defining the shape of the output Tensor. - out (Tensor, optional): the result Tensor + sizes (int...): a set of integers defining the shape of the output tensor + out (Tensor, optional): the output tensor Example:: @@ -4981,14 +4994,15 @@ """) add_docstr(torch._C.zeros_like, - """ + r""" zeros_like(input, out=None) -> Tensor -Returns a Tensor filled with the scalar value `0`, with the same size as :attr:`input`. +Returns a tensor filled with the scalar value `0`, with the same size as +:attr:`input`. Args: - input (Tensor): The size of the input will determine the size of the output. - out (Tensor, optional): the result Tensor + input (Tensor): the size of the input will determine the size of the output. + out (Tensor, optional): the output tensor Example:: @@ -5001,7 +5015,7 @@ """) add_docstr(torch._C.btrifact, - """ + r""" btrifact(A, info=None, pivot=True) -> Tensor, IntTensor Batch LU factorization. @@ -5014,7 +5028,7 @@ LAPACK. Pivoting is done if pivot is set. Arguments: - A (Tensor): tensor to factor. + A (Tensor): the tensor to factor Example:: @@ -5025,17 +5039,17 @@ add_docstr(torch._C.btrisolve, - """ + r""" btrisolve(b, LU_data, LU_pivots) -> Tensor Batch LU solve. -Returns the LU solve of the linear system Ax = b. +Returns the LU solve of the linear system :math:`Ax = b`. Arguments: - b (Tensor): RHS tensor. - LU_data (Tensor): Pivoted LU factorization of A from btrifact. - LU_pivots (IntTensor): Pivots of the LU factorization. + b (Tensor): the RHS tensor + LU_data (Tensor): the pivoted LU factorization of A from :meth:`btrifact`. + LU_pivots (IntTensor): the pivots of the LU factorization Example:: diff --git a/torch/functional.py b/torch/functional.py index 2f91deb82736..b1cbc27290b3 100644 --- a/torch/functional.py +++ b/torch/functional.py @@ -9,15 +9,15 @@ def split(tensor, split_size, dim=0): - """Splits the tensor into equally sized chunks (if possible). + """Splits the tensor into chunks all of size :attr:`split_size` (if possible). Last chunk will be smaller if the tensor size along a given dimension - is not divisible by ``split_size``. + is not divisible by :attr`split_size`. Arguments: - tensor (Tensor): tensor to split. - split_size (int): size of a single chunk. - dim (int): dimension along which to split the tensor. + tensor (Tensor): the tensor to split + split_size (int): size of a single chunk + dim (int): dimension along which to split the tensor """ if dim < 0: dim += tensor.dim() @@ -32,12 +32,12 @@ def get_split_size(i): def chunk(tensor, chunks, dim=0): - """Splits a tensor into a number of chunks along a given dimension. + """Splits a tensor into a specific number of chunks. Arguments: - tensor (Tensor): tensor to split. - chunks (int): number of chunks to return. - dim (int): dimension along which to split the tensor. + tensor (Tensor): the tensor to split + chunks (int): number of chunks to return + dim (int): dimension along which to split the tensor """ if dim < 0: dim += tensor.dim() @@ -51,9 +51,9 @@ def stack(sequence, dim=0, out=None): All tensors need to be of the same size. Arguments: - sequence (Sequence): sequence of tensors to concatenate. + sequence (Sequence): sequence of tensors to concatenate dim (int): dimension to insert. Has to be between 0 and the number - of dimensions of concatenated tensors (inclusive). + of dimensions of concatenated tensors (inclusive) """ if len(sequence) == 0: raise ValueError("stack expects a non-empty sequence of tensors") @@ -72,8 +72,8 @@ def unbind(tensor, dim=0): Returns a tuple of all slices along a given dimension, already without it. Arguments: - tensor (Tensor): tensor to unbind. - dim (int): dimension to remove. + tensor (Tensor): the tensor to unbind + dim (int): dimension to remove """ return tuple(tensor.select(dim, i) for i in _range(tensor.size(dim))) @@ -87,10 +87,10 @@ def btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True): 2: The U tensor. Arguments: - LU_data (Tensor): The packed LU factorization data. - LU_pivots (Tensor): The packed LU factorization pivots. - unpack_data (bool): Flag indicating if the data should be unpacked. - unpack_pivots (bool): Flag indicating if the pivots should be unpacked. + LU_data (Tensor): the packed LU factorization data + LU_pivots (Tensor): the packed LU factorization pivots + unpack_data (bool): flag indicating if the data should be unpacked + unpack_pivots (bool): tlag indicating if the pivots should be unpacked """ nBatch, sz, _ = LU_data.size() @@ -122,7 +122,7 @@ def btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True): def matmul(tensor1, tensor2, out=None): - """Matrix product of two tensors. + r"""Matrix product of two tensors. The behavior depends on the dimensionality of the tensors as follows: @@ -139,17 +139,18 @@ def matmul(tensor1, tensor2, out=None): batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus - must be broadcastable). For example, if :attr:`tensor1` is a `j x 1 x n x m` Tensor - and :attr:`tensor2` is a `k x m x p` Tensor, :attr:`out` will be an `j x k x n x p` Tensor. + must be broadcastable). For example, if :attr:`tensor1` is a + :math:`(j \times 1 \times n \times m)` tensor and :attr:`tensor2` is a :math:`(k \times m \times p)` + tensor, :attr:`out` will be an :math:`(j \times k \times n \times p)` tensor. .. note:: The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. Arguments: - tensor1 (Tensor): First tensor to be multiplied - tensor2 (Tensor): Second tensor to be multiplied - out (Tensor, optional): Output tensor + tensor1 (Tensor): the first tensor to be multiplied + tensor2 (Tensor): the second tensor to be multiplied + out (Tensor, optional): the output tensor """ dim_tensor1 = tensor1.dim() dim_tensor2 = tensor2.dim() diff --git a/torch/tensor.py b/torch/tensor.py index ef5114469000..e51472bc93a4 100644 --- a/torch/tensor.py +++ b/torch/tensor.py @@ -14,11 +14,12 @@ class _TensorBase(object): # CUDA case, which handles constructing the tensor on the same GPU # as this tensor. def new(self, *args, **kwargs): - """Constructs a new tensor of the same data type. + r"""Constructs a new tensor of the same data type as :attr:`self` tensor. - Any valid argument combination to the Tensor constructor is accepted by - this method, including sizes, :class:`torch.Storage`, numpy ndarray, - Python Sequence, etc. See :class:`torch.Tensor` for more details. + Any valid argument combination to the tensor constructor is accepted by + this method, including sizes, :class:`torch.Storage`, NumPy ndarray, + Python Sequence, etc. See :ref:`torch.Tensor ` for more + details. .. note:: For CUDA tensors, this method will create new tensor on the same device as this tensor. @@ -26,61 +27,62 @@ def new(self, *args, **kwargs): return self.__class__(*args, **kwargs) def type_as(self, tensor): - """Returns this tensor cast to the type of the given tensor. + r"""Returns this :attr:`self` tensor cast to the type of the given + tensor. - This is a no-op if the tensor is already of the correct type. This is - equivalent to:: + This is a no-op if the :attr:`self` tensor is already of the correct + type. This is equivalent to:: self.type(tensor.type()) Params: - tensor (Tensor): the tensor which has the desired type + tensor (Tensor): the tensor with the desired type """ return self.type(tensor.type()) def cpu(self): - """Returns a CPU copy of this tensor if it's not already on the CPU""" + r"""Returns a CPU copy of this tensor if it's not already on the CPU""" return self.type(getattr(torch, self.__class__.__name__)) def double(self): - """Casts this tensor to double type""" + r"""Casts this tensor to double type""" return self.type(type(self).__module__ + '.DoubleTensor') def float(self): - """Casts this tensor to float type""" + r"""Casts this tensor to float type""" return self.type(type(self).__module__ + '.FloatTensor') def half(self): - """Casts this tensor to half-precision float type""" + r"""Casts this tensor to half-precision float type""" return self.type(type(self).__module__ + '.HalfTensor') def long(self): - """Casts this tensor to long type""" + r"""Casts this tensor to long type""" return self.type(type(self).__module__ + '.LongTensor') def int(self): - """Casts this tensor to int type""" + r"""Casts this tensor to int type""" return self.type(type(self).__module__ + '.IntTensor') def short(self): - """Casts this tensor to short type""" + r"""Casts this tensor to short type""" return self.type(type(self).__module__ + '.ShortTensor') def char(self): - """Casts this tensor to char type""" + r"""Casts this tensor to char type""" return self.type(type(self).__module__ + '.CharTensor') def byte(self): - """Casts this tensor to byte type""" + r"""Casts this tensor to byte type""" return self.type(type(self).__module__ + '.ByteTensor') def is_pinned(self): - """Returns true if this tensor resides in pinned memory""" + r"""Returns true if this tensor resides in pinned memory""" storage = self.storage() return storage.is_pinned() if storage else False def pin_memory(self): - """Copies the tensor to pinned memory, if it's not already pinned.""" + r"""Copies the tensor to pinned memory, if it's not already pinned.""" if self.is_cuda: raise TypeError("cannot pin '{0}' only CPU memory can be pinned" .format(self.type())) @@ -90,7 +92,7 @@ def pin_memory(self): return type(self)().set_(storage.pin_memory()).view_as(self) def share_memory_(self): - """Moves the underlying storage to shared memory. + r"""Moves the underlying storage to shared memory. This is a no-op if the underlying storage is already in shared memory and for CUDA tensors. Tensors in shared memory cannot be resized. @@ -99,7 +101,7 @@ def share_memory_(self): return self def is_shared(self): - """Checks if tensor is in shared memory. + r"""Checks if tensor is in shared memory. This is always ``True`` for CUDA tensors. """ @@ -107,9 +109,10 @@ def is_shared(self): @property def shape(self): - """Alias for .size() + r"""Alias for .size() - Returns a torch.Size object, containing the dimensions of the tensor + Returns a torch.Size object, containing the dimensions of the + :attr:`self` Tensor. """ return self.size() @@ -170,27 +173,27 @@ def __iter__(self): return iter([]) def split(self, split_size, dim=0): - """Splits this tensor into a tuple of tensors. + r"""Splits this tensor into tensor chunks of :attr:`split_size` size. See :func:`torch.split`. """ return torch.split(self, split_size, dim) def chunk(self, n_chunks, dim=0): - """Splits this tensor into a tuple of tensors. + r"""Splits this tensor into a certain number of tensor chunks. See :func:`torch.chunk`. """ return torch.chunk(self, n_chunks, dim) def matmul(self, other): - """Matrix product of two tensors. + r"""Matrix product of two tensors. See :func:`torch.matmul`.""" return torch.matmul(self, other) def tolist(self): - """Returns a nested list represenation of this tensor.""" + r"""Returns a nested list represenation of this tensor.""" dim = self.dim() if dim == 1: return [v for v in self] @@ -199,7 +202,7 @@ def tolist(self): return [] def view_as(self, tensor): - """Returns this tensor viewed as the size as the specified tensor. + r"""Returns this tensor viewed as the size as the specified tensor. This is equivalent to:: @@ -208,7 +211,7 @@ def view_as(self, tensor): return self.view(tensor.size()) def permute(self, *dims): - """Permute the dimensions of this tensor. + r"""Permute the dimensions of this tensor. Args: *dims (int...): The desired ordering of dimensions @@ -237,7 +240,7 @@ def permute(self, *dims): return tensor def expand_as(self, tensor): - """Expands this tensor to the size of the specified tensor. + r"""Expands this tensor to the size of the specified tensor. This is equivalent to:: @@ -246,7 +249,7 @@ def expand_as(self, tensor): return self.expand(tensor.size()) def repeat(self, *sizes): - """Repeats this tensor along the specified dimensions. + r"""Repeats this tensor along the specified dimensions. Unlike :meth:`expand`, this function copies the tensor's data.