Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions aten/src/ATen/core/aten_interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ _(aten, _abs) \
_(aten, _addmv) \
_(aten, _addr) \
_(aten, _arange) \
_(aten, _asinh) \
_(aten, _atanh) \
_(aten, _argmax) \
_(aten, _argmin) \
_(aten, _baddbmm_mkl) \
Expand Down
8 changes: 8 additions & 0 deletions aten/src/ATen/core/interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,18 @@ namespace c10 {
_(aten, asin_) \
_(aten, arcsin) \
_(aten, arcsin_) \
_(aten, asinh) \
_(aten, asinh_) \
_(aten, arcsinh) \
_(aten, arcsinh_) \
_(aten, atan) \
_(aten, atan_) \
_(aten, arctan) \
_(aten, arctan_) \
_(aten, atanh) \
_(aten, atanh_) \
_(aten, arctanh) \
_(aten, arctanh_) \
_(aten, clamp) \
_(aten, clamp_) \
_(aten, clip) \
Expand Down
28 changes: 19 additions & 9 deletions aten/src/ATen/native/UnaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ Tensor acos(const Tensor& self) { return unary_op_impl(self, at::acos_out); }
Tensor& acos_(Tensor& self) { return unary_op_impl_(self, at::acos_out); }

// arccos, alias for acos
Tensor& arccos_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, acos_stub); }
Tensor arccos(const Tensor& self) { return unary_op_impl(self, at::acos_out); }
Tensor& arccos_(Tensor& self) { return unary_op_impl_(self, at::acos_out); }
Tensor& arccos_out(Tensor& result, const Tensor& self) { return at::acos_out(result, self); }
Tensor arccos(const Tensor& self) { return self.acos(); }
Tensor& arccos_(Tensor& self) { return self.acos_(); }

static Tensor wrapped_scalar_tensor(Scalar scalar) {
auto tensor = scalar_to_tensor(scalar);
Expand Down Expand Up @@ -140,18 +140,18 @@ Tensor asin(const Tensor& self) { return unary_op_impl(self, at::asin_out); }
Tensor& asin_(Tensor& self) { return unary_op_impl_(self, at::asin_out); }

// arcsin, alias of asin
Tensor& arcsin_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, asin_stub); }
Tensor arcsin(const Tensor& self) { return unary_op_impl(self, at::asin_out); }
Tensor& arcsin_(Tensor& self) { return unary_op_impl_(self, at::asin_out); }
Tensor& arcsin_out(Tensor& result, const Tensor& self) { return at::asin_out(result, self); }
Tensor arcsin(const Tensor& self) { return self.asin(); }
Tensor& arcsin_(Tensor& self) { return self.asin_(); }

Tensor& atan_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, atan_stub); }
Tensor atan(const Tensor& self) { return unary_op_impl(self, at::atan_out); }
Tensor& atan_(Tensor& self) { return unary_op_impl_(self, at::atan_out); }

// arctan, alias of atan
Tensor& arctan_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, atan_stub); }
Tensor arctan(const Tensor& self) { return unary_op_impl(self, at::atan_out); }
Tensor& arctan_(Tensor& self) { return unary_op_impl_(self, at::atan_out); }
Tensor& arctan_out(Tensor& result, const Tensor& self) { return at::atan_out(result, self); }
Tensor arctan(const Tensor& self) { return self.atan(); }
Tensor& arctan_(Tensor& self) { return self.atan_(); }

// Note [Complex abs and angle]
// Complex inputs to abs and angle return float results by default.
Expand Down Expand Up @@ -314,10 +314,20 @@ Tensor& asinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out
Tensor asinh(const Tensor& self) { return unary_op_impl(self, at::asinh_out); }
Tensor& asinh_(Tensor& self) { return unary_op_impl_(self, at::asinh_out); }

// arcsinh, alias for asinh
Tensor& arcsinh_out(Tensor& result, const Tensor& self) { return at::asinh_out(result, self); }
Tensor arcsinh(const Tensor& self) { return self.asinh(); }
Tensor& arcsinh_(Tensor& self) { return self.asinh_(); }

Tensor& atanh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, atanh_stub); }
Tensor atanh(const Tensor& self) { return unary_op_impl(self, at::atanh_out); }
Tensor& atanh_(Tensor& self) { return unary_op_impl_(self, at::atanh_out); }

// arctanh, alias for atanh
Tensor& arctanh_out(Tensor& result, const Tensor& self) { return at::atanh_out(result, self); }
Tensor arctanh(const Tensor& self) { return self.atanh(); }
Tensor& arctanh_(Tensor& self) { return self.atanh_(); }

Tensor& sqrt_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, sqrt_stub); }
Tensor sqrt(const Tensor& self) { return unary_op_impl(self, at::sqrt_out); }
Tensor& sqrt_(Tensor& self) { return unary_op_impl_(self, at::sqrt_out); }
Expand Down
22 changes: 22 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -495,6 +495,17 @@

- func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)

# arcsinh, alias for asinh
- func: arcsinh(Tensor self) -> Tensor
use_c10_dispatcher: full
variants: function, method

- func: arcsinh_(Tensor(a!) self) -> Tensor(a!)
use_c10_dispatcher: full
variants: function, method

- func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)

- func: atanh(Tensor self) -> Tensor
use_c10_dispatcher: full
variants: function, method
Expand All @@ -505,6 +516,17 @@

- func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)

# arctanh, alias for atanh
- func: arctanh(Tensor self) -> Tensor
use_c10_dispatcher: full
variants: function, method

- func: arctanh_(Tensor(a!) self) -> Tensor(a!)
use_c10_dispatcher: full
variants: function, method

- func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)

- func: as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a)
use_c10_dispatcher: full
variants: function, method
Expand Down
4 changes: 4 additions & 0 deletions docs/source/tensors.rst
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: sinh_
.. automethod:: asinh
.. automethod:: asinh_
.. automethod:: arcsinh
.. automethod:: arcsinh_
.. automethod:: size
.. automethod:: slogdet
.. automethod:: solve
Expand Down Expand Up @@ -554,6 +556,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: tanh_
.. automethod:: atanh
.. automethod:: atanh_
.. automethod:: arctanh
.. automethod:: arctanh_
.. automethod:: tolist
.. automethod:: topk
.. automethod:: to_sparse
Expand Down
2 changes: 2 additions & 0 deletions docs/source/torch.rst
Original file line number Diff line number Diff line change
Expand Up @@ -261,9 +261,11 @@ Pointwise Ops
asin
arcsin
asinh
arcsinh
atan
arctan
atanh
arctanh
atan2
bitwise_not
bitwise_and
Expand Down
8 changes: 8 additions & 0 deletions test/test_op_aliases.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,14 @@ def __init__(self,
lambda d: 10 * torch.randn(20, device=d)),
AliasInfo('negative_', torch.Tensor.negative_, 'neg_', torch.Tensor.neg_,
lambda d: 10 * torch.randn(20, device=d)),
AliasInfo('arcsinh', torch.arcsinh, 'asinh', torch.asinh,
lambda d: torch.randn(20, device=d)),
AliasInfo('arcsinh_', torch.Tensor.arcsinh_, 'asinh_', torch.Tensor.asinh_,
lambda d: torch.randn(20, device=d)),
AliasInfo('arctanh', torch.arctanh, 'atanh', torch.atanh,
lambda d: torch.clamp(torch.randn(20, device=d), -1, 1)),
AliasInfo('arctanh_', torch.Tensor.arctanh_, 'atanh_', torch.Tensor.atanh_,
lambda d: torch.clamp(torch.randn(20, device=d), -1, 1)),
)

# Placeholder test class for validating that aliases are correctly
Expand Down
38 changes: 30 additions & 8 deletions torch/_tensor_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,18 @@ def add_docstr_all(method, docstr):
In-place version of :meth:`~Tensor.asinh`
""")

add_docstr_all('arcsinh', r"""
arcsinh() -> Tensor

See :func:`torch.arcsinh`
""")

add_docstr_all('arcsinh_', r"""
arcsinh_() -> Tensor

In-place version of :meth:`~Tensor.arcsinh`
""")

add_docstr_all('as_strided', r"""
as_strided(size, stride, storage_offset=0) -> Tensor

Expand Down Expand Up @@ -574,32 +586,42 @@ def add_docstr_all(method, docstr):
In-place version of :meth:`~Tensor.arctan`
""")

add_docstr_all('atan2',
r"""
add_docstr_all('atan2', r"""
atan2(other) -> Tensor

See :func:`torch.atan2`
""")

add_docstr_all('atan2_',
r"""
add_docstr_all('atan2_', r"""
atan2_(other) -> Tensor

In-place version of :meth:`~Tensor.atan2`
""")

add_docstr_all('atanh',
r"""
add_docstr_all('atanh', r"""
atanh() -> Tensor

See :func:`torch.atanh`
""")

add_docstr_all('atanh_',
r"""
add_docstr_all('atanh_', r"""
atanh_(other) -> Tensor

In-place version of :meth:`~Tensor.atanh`
""")

add_docstr_all('arctanh', r"""
arctanh() -> Tensor

See :func:`torch.arctanh`
""")

add_docstr_all('arctanh_', r"""
arctanh_(other) -> Tensor

In-place version of :meth:`~Tensor.arctanh`
""")

add_docstr_all('baddbmm',
r"""
baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
Expand Down
15 changes: 13 additions & 2 deletions torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -699,6 +699,12 @@ def merge_dicts(*dicts):
tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
""".format(**common_args))

add_docstr(torch.arcsinh, r"""
arcsinh(input, *, out=None) -> Tensor

Alias for :func:`torch.asinh`.
""")

add_docstr(torch.atan, r"""
atan(input, *, out=None) -> Tensor

Expand Down Expand Up @@ -758,8 +764,7 @@ def merge_dicts(*dicts):
tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
""".format(**common_args))

add_docstr(torch.atanh,
r"""
add_docstr(torch.atanh, r"""
atanh(input, *, out=None) -> Tensor

Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
Expand Down Expand Up @@ -787,6 +792,12 @@ def merge_dicts(*dicts):
tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
""".format(**common_args))

add_docstr(torch.arctanh, r"""
arctanh(input, *, out=None) -> Tensor

Alias for :func:`torch.atanh`.
""")

add_docstr(torch.baddbmm,
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Expand Down
29 changes: 11 additions & 18 deletions torch/csrc/jit/passes/normalize_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,17 @@ namespace {

// map from op alias -> normalized op
static const std::unordered_map<Symbol, Symbol> alias_map = {
{aten::absolute, aten::abs},
{aten::absolute_, aten::abs_},
{aten::clip, aten::clamp},
{aten::clip_, aten::clamp_},
{aten::linalg_det, aten::det},
{aten::outer, aten::ger},
{aten::arccosh, aten::acosh},
{aten::arccosh_, aten::acosh_},
{aten::arccos, aten::acos},
{aten::arccos_, aten::acos_},
{aten::arcsin, aten::asin},
{aten::arcsin_, aten::asin_},
{aten::arctan, aten::atan},
{aten::arctan_, aten::atan_},
{aten::fix, aten::trunc},
{aten::fix_, aten::trunc_},
{aten::negative, aten::neg},
{aten::negative_, aten::neg_},
{aten::absolute, aten::abs}, {aten::absolute_, aten::abs_},
{aten::clip, aten::clamp}, {aten::clip_, aten::clamp_},
{aten::linalg_det, aten::det}, {aten::outer, aten::ger},
{aten::arccos, aten::acos}, {aten::arccos_, aten::acos_},
{aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_},
{aten::arctan, aten::atan}, {aten::arctan_, aten::atan_},
{aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_},
{aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_},
{aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_},
{aten::fix, aten::trunc}, {aten::fix_, aten::trunc_},
{aten::negative, aten::neg}, {aten::negative_, aten::neg_},
};

void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) {
Expand Down
2 changes: 2 additions & 0 deletions torch/overrides.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,10 +229,12 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.asin: lambda input, out=None: -1,
torch.arcsin: lambda input, out=None: -1,
torch.asinh: lambda input, out=None: -1,
torch.arcsinh: lambda input, out=None: -1,
torch.atan: lambda input, out=None: -1,
torch.arctan: lambda input, out=None: -1,
torch.atan2: lambda input, other, out=None: -1,
torch.atanh: lambda input, out=None: -1,
torch.arctanh: lambda input, out=None: -1,
torch.atleast_1d: lambda input: -1,
torch.atleast_2d: lambda input: -1,
torch.atleast_3d: lambda input: -1,
Expand Down