diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 88201a6170c2..b84c754b7efd 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -220,10 +220,14 @@ - func: abs(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: abs - func: abs_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: abs_ - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -268,6 +272,8 @@ - func: angle(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: angle - func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -276,10 +282,14 @@ - func: view_as_real(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: view_as_real - func: view_as_complex(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: view_as_complex - func: sgn(Tensor self) -> Tensor use_c10_dispatcher: full @@ -315,14 +325,20 @@ - func: _conj(Tensor self) -> Tensor use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: _conj - func: acos(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: acos - func: acos_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: acos_ - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -393,10 +409,14 @@ - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: add - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: add_ - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full @@ -423,16 +443,24 @@ - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: addr - func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: addr_ - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: addr_out - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: affine_grid_generator - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor use_c10_dispatcher: full @@ -511,10 +539,14 @@ - func: acosh(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: acosh - func: acosh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: acosh_ - func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -534,10 +566,14 @@ - func: asinh(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: asinh - func: asinh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: asinh_ - func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -557,10 +593,14 @@ - func: atanh(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: atanh - func: atanh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: atanh_ - func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -588,10 +628,14 @@ - func: as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) variants: function, method device_guard: False + dispatch: + DefaultBackend: as_strided_ - func: asin(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: asin - func: asin_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full @@ -619,10 +663,14 @@ - func: atan(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: atan - func: atan_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: atan_ - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -709,6 +757,8 @@ # Sample bernoulli with values in `self` as probability. - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor variants: function, method + dispatch: + DefaultBackend: bernoulli - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) variants: function @@ -767,6 +817,8 @@ - func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function + dispatch: + DefaultBackend: binary_cross_entropy_with_logits - func: binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -879,8 +931,12 @@ - func: cat(Tensor[] tensors, int dim=0) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: cat - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: cat_out - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor @@ -893,10 +949,14 @@ - func: ceil(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: ceil - func: ceil_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: ceil_ - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -934,6 +994,8 @@ - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: clamp_ - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -942,10 +1004,14 @@ - func: clamp_max(Tensor self, Scalar max) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: clamp_max - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: clamp_max_ - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -954,10 +1020,14 @@ - func: clamp_min(Tensor self, Scalar min) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: clamp_min - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: clamp_min_ - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -980,6 +1050,8 @@ - func: complex(Tensor real, Tensor imag) -> Tensor use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: complex - func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -988,6 +1060,8 @@ - func: polar(Tensor abs, Tensor angle) -> Tensor use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: polar - func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -996,6 +1070,8 @@ - func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: constant_pad_nd - func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) use_c10_dispatcher: full @@ -1006,9 +1082,13 @@ - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + dispatch: + DefaultBackend: convolution_overrideable - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) use_c10_dispatcher: full + dispatch: + DefaultBackend: convolution_backward_overrideable - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -1033,6 +1113,8 @@ - func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: conv_tbc - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: full @@ -1051,6 +1133,8 @@ use_c10_dispatcher: full variants: method device_guard: False + dispatch: + DefaultBackend: copy_ - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor use_c10_dispatcher: full @@ -1059,10 +1143,14 @@ - func: cos(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cos - func: cos_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cos_ - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1071,10 +1159,14 @@ - func: cosh(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cosh - func: cosh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cosh_ - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1092,6 +1184,8 @@ - func: count_nonzero(Tensor self, int? dim=None) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: count_nonzero - func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid use_c10_dispatcher: full @@ -1191,8 +1285,12 @@ - func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cummax - func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + DefaultBackend: cummax_out - func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) variants: function, method @@ -1208,8 +1306,12 @@ - func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cummin - func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + DefaultBackend: cummin_out - func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) variants: function, method @@ -1230,8 +1332,12 @@ - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cumprod - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: cumprod_out - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor variants: function, method @@ -1246,8 +1352,12 @@ - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: cumsum - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: cumsum_out - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor variants: function, method @@ -1284,6 +1394,8 @@ - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: diagonal - func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) variants: function, method @@ -1320,10 +1432,14 @@ - func: div.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: div - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: div_ # divide, alias for div - func: divide.Tensor(Tensor self, Tensor other) -> Tensor @@ -1371,6 +1487,8 @@ CUDA: dot_cuda - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: dot_out - func: vdot(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full @@ -1380,12 +1498,16 @@ CUDA: vdot_cuda - func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: vdot_out - func: einsum(str equation, Tensor[] tensors) -> Tensor use_c10_dispatcher: full - func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: embedding - func: embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor use_c10_dispatcher: full @@ -1524,10 +1646,14 @@ - func: erf(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: erf - func: erf_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: erf_ - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1536,10 +1662,14 @@ - func: erfc(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: erfc - func: erfc_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: erfc_ - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1548,10 +1678,14 @@ - func: exp(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: exp - func: exp_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: exp_ - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1560,10 +1694,14 @@ - func: exp2(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: exp2 - func: exp2_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: exp2_ - func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1572,10 +1710,14 @@ - func: expm1(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: expm1 - func: expm1_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: expm1_ - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1585,6 +1727,8 @@ use_c10_dispatcher: full variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. device_guard: False + dispatch: + DefaultBackend: expand - func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a) use_c10_dispatcher: full @@ -1629,18 +1773,26 @@ - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: fill_ - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: fill_ - func: floor(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: floor - func: floor_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: floor_ - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1676,10 +1828,14 @@ - func: frac(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: frac - func: frac_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: frac_ - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -1758,6 +1914,8 @@ # See NOTE [ grid_sample CPU fallback ] - func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: _grid_sampler_2d_cpu_fallback - func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) use_c10_dispatcher: full @@ -1865,6 +2023,8 @@ - func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: index_copy_ - func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor use_c10_dispatcher: full @@ -1878,6 +2038,8 @@ - func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) variants: function, method + dispatch: + DefaultBackend: index_put_ # NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp: # - Tensor & Tensor::index_put_(ArrayRef indices, Tensor const & rhs) # - Tensor & Tensor::index_put_(ArrayRef indices, Scalar v) @@ -1899,8 +2061,12 @@ - func: inverse(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: inverse - func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: inverse_out - func: _inverse_helper(Tensor self) -> Tensor use_c10_dispatcher: full @@ -1957,6 +2123,8 @@ - func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: kl_div - func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor use_c10_dispatcher: full @@ -1967,6 +2135,8 @@ - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: kthvalue - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: @@ -1996,12 +2166,18 @@ - func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: nan_to_num - func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: nan_to_num_ - func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: nan_to_num_out - func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -2048,10 +2224,14 @@ - func: log(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log - func: log_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log_ - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2060,10 +2240,14 @@ - func: log10(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log10 - func: log10_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log10_ - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2072,6 +2256,8 @@ - func: log1p(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log1p - func: log1p_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full @@ -2088,10 +2274,14 @@ - func: log2(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log2 - func: log2_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: log2_ - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2104,6 +2294,8 @@ - func: logaddexp(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: logaddexp - func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2112,10 +2304,14 @@ - func: logaddexp2(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: logaddexp2 - func: logdet(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: logdet - func: logspace(Scalar start, Scalar end, int? steps=None, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -2159,8 +2355,12 @@ - func: logcumsumexp(Tensor self, int dim) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: logcumsumexp - func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: logcumsumexp_out - func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor variants: function, method @@ -2170,8 +2370,12 @@ - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: logsumexp - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: logsumexp_out - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor variants: function, method @@ -2229,6 +2433,8 @@ - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: max - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: @@ -2247,6 +2453,8 @@ - func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: amax - func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2321,6 +2529,8 @@ - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: median - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: @@ -2342,6 +2552,8 @@ - func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: nanmedian - func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: @@ -2356,6 +2568,8 @@ - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: min - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) dispatch: @@ -2369,6 +2583,8 @@ - func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: amin - func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2376,6 +2592,8 @@ - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures + dispatch: + DefaultBackend: mkldnn_convolution - func: mkldnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> Tensor use_c10_dispatcher: full @@ -2385,6 +2603,8 @@ - func: mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: full + dispatch: + DefaultBackend: mkldnn_convolution_backward - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -2497,6 +2717,8 @@ CPU, CUDA: mode - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + dispatch: + DefaultBackend: mode_out - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) variants: function, method @@ -2530,10 +2752,14 @@ - func: mul.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: mul - func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: mul_ # multiply, alias for mul - func: multiply.Tensor(Tensor self, Tensor other) -> Tensor @@ -2562,14 +2788,20 @@ SparseCPU, SparseCUDA: mv_sparse - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: mv_out - func: mvlgamma(Tensor self, int p) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: mvlgamma - func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: mvlgamma_ - func: narrow_copy(Tensor self, int dim, int start, int length) -> Tensor use_c10_dispatcher: full @@ -2655,6 +2887,8 @@ - func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures variants: function + dispatch: + DefaultBackend: _nnpack_spatial_convolution - func: _nnpack_spatial_convolution_backward(Tensor input, Tensor grad_output, Tensor weight, int[2] padding, bool[3] output_mask) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: full @@ -2687,6 +2921,8 @@ - func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: _euclidean_dist - func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor use_c10_dispatcher: full @@ -2718,6 +2954,8 @@ - func: permute(Tensor(a) self, int[] dims) -> Tensor(a) use_c10_dispatcher: full variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + dispatch: + DefaultBackend: permute - func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) use_c10_dispatcher: full @@ -2766,22 +3004,34 @@ - func: rad2deg(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: rad2deg - func: rad2deg_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: rad2deg_ - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: rad2deg_out - func: deg2rad(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: deg2rad - func: deg2rad_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: deg2rad_ - func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: deg2rad_out - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -2876,10 +3126,14 @@ - func: reciprocal(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: reciprocal - func: reciprocal_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: reciprocal_ - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -2888,6 +3142,8 @@ - func: neg(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: neg - func: neg_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full @@ -2915,6 +3171,8 @@ - func: repeat(Tensor self, int[] repeats) -> Tensor use_c10_dispatcher: full variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too. + dispatch: + DefaultBackend: repeat - func: repeat_interleave.Tensor(Tensor repeats) -> Tensor use_c10_dispatcher: full @@ -2950,10 +3208,14 @@ - func: round(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: round - func: round_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: round_ - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3029,10 +3291,14 @@ - func: rsqrt(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: rsqrt - func: rsqrt_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: rsqrt_ - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3046,6 +3312,8 @@ use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: select - func: select_backward(Tensor grad, int[] input_sizes, int dim, int index) -> Tensor use_c10_dispatcher: full @@ -3060,17 +3328,25 @@ - func: celu(Tensor self, Scalar alpha=1.0) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: celu - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) use_c10_dispatcher: full + dispatch: + DefaultBackend: celu_ - func: silu(Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: silu - func: silu_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: silu_ - func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -3119,10 +3395,14 @@ - func: sin(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sin - func: sin_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sin_ - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3131,10 +3411,14 @@ - func: sinh(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sinh - func: sinh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sinh_ - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3154,6 +3438,8 @@ - func: detach(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: detach # Like `detach()`, but modifies this `Variable` in-place. This method may # only be called on non-view `Variable`s. You can use `is_view()` to check @@ -3161,6 +3447,8 @@ - func: detach_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: detach_ - func: size.int(Tensor self, int dim) -> int use_c10_dispatcher: full @@ -3175,6 +3463,8 @@ use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: slice - func: slice_backward(Tensor grad, int[] input_sizes, int dim, int start, int end, int step) -> Tensor use_c10_dispatcher: full @@ -3184,6 +3474,8 @@ - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: slogdet - func: smm(Tensor self, Tensor mat2) -> Tensor use_c10_dispatcher: full @@ -3214,31 +3506,43 @@ use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: unsafe_split - func: split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[] use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: split - func: unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: unsafe_split_with_sizes - func: split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -> Tensor(a)[] use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: split_with_sizes - func: squeeze(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: squeeze - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: squeeze - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) variants: function, method @@ -3248,11 +3552,15 @@ use_c10_dispatcher: full variants: method device_guard: False + dispatch: + DefaultBackend: squeeze_ - func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) use_c10_dispatcher: full variants: method device_guard: False + dispatch: + DefaultBackend: squeeze_ - func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) variants: method @@ -3271,8 +3579,12 @@ - func: stack(Tensor[] tensors, int dim=0) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: stack - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: stack_out - func: hstack(Tensor[] tensors) -> Tensor use_c10_dispatcher: full @@ -3355,10 +3667,14 @@ - func: sqrt(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sqrt - func: sqrt_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sqrt_ - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3433,19 +3749,27 @@ use_c10_dispatcher: full device_guard: False variants: function, method + dispatch: + DefaultBackend: t - func: t_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full device_guard: False variants: method + dispatch: + DefaultBackend: t_ - func: tan(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: tan - func: tan_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: tan_ - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3461,6 +3785,8 @@ - func: tanh_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: tanh_ - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3502,6 +3828,8 @@ use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: transpose - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) variants: function, method @@ -3517,6 +3845,8 @@ use_c10_dispatcher: full variants: method device_guard: False + dispatch: + DefaultBackend: transpose_ - func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) use_c10_dispatcher: full @@ -3556,6 +3886,8 @@ - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: rot90 - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor use_c10_dispatcher: full @@ -3565,6 +3897,8 @@ - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: _trilinear - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor use_c10_dispatcher: full @@ -3572,10 +3906,14 @@ - func: trunc(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: trunc - func: trunc_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: trunc_ - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3641,16 +3979,22 @@ - func: _unsafe_view(Tensor self, int[] size) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: _unsafe_view - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a) use_c10_dispatcher: full variants: function, method device_guard: False + dispatch: + DefaultBackend: unsqueeze - func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) use_c10_dispatcher: full variants: method device_guard: False + dispatch: + DefaultBackend: unsqueeze_ - func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor use_c10_dispatcher: full @@ -3819,6 +4163,8 @@ - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: _sparse_sum - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor use_c10_dispatcher: full @@ -3870,18 +4216,26 @@ - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: norm - func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: norm - func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: norm - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: norm - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -3938,6 +4292,8 @@ - func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: resize_as_ - func: zero_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full @@ -3970,10 +4326,14 @@ - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sub - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: sub_ # subtract, alias for sub - func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) @@ -4016,11 +4376,15 @@ - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor use_c10_dispatcher: full variants: function + dispatch: + DefaultBackend: rsub # Functionally the same as addmm, but we give it a different derivative formula # that doesn't propagate gradients to non-present entries on sparse. - func: _sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full + dispatch: + DefaultBackend: _sparse_addmm - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -4323,6 +4687,8 @@ - func: unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[] use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: unbind - func: unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[] variants: function, method @@ -4664,6 +5030,8 @@ # PackedSequence utilities - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) use_c10_dispatcher: full + dispatch: + DefaultBackend: _pack_padded_sequence - func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor use_c10_dispatcher: full @@ -4862,10 +5230,14 @@ - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: eq_ - func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: eq_ - func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) variants: function @@ -5159,6 +5531,8 @@ - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: addcdiv_ - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) variants: method @@ -5210,6 +5584,8 @@ - func: diag(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: diag - func: diag_backward(Tensor grad, int[] input_sizes, int diagonal) -> Tensor use_c10_dispatcher: full @@ -5234,6 +5610,8 @@ - func: triu(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: triu - func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -5243,6 +5621,8 @@ - func: tril(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: tril - func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor use_c10_dispatcher: hacky_wrapper_for_legacy_signatures @@ -5295,10 +5675,14 @@ - func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: ne_ - func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: ne_ # not_equal, alias for torch.ne - func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) @@ -5372,10 +5756,14 @@ - func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: ge_ - func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: ge_ # greater_equal, alias for torch.ge - func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) @@ -5425,10 +5813,14 @@ - func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: le_ - func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: le_ # less_equal, alias for torch.le - func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) @@ -5478,10 +5870,14 @@ - func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: gt_ - func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: gt_ # greater, alias for torch.gt - func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) @@ -5531,10 +5927,14 @@ - func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: lt_ - func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: lt_ # less, alias for torch.lt - func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) @@ -5662,10 +6062,14 @@ - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: addcmul - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: addcmul_ - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -5674,6 +6078,8 @@ - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: addcdiv - func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) dispatch: @@ -5688,10 +6094,14 @@ CUDA: legacy::cuda::_th_gels - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + dispatch: + DefaultBackend: triangular_solve_out - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: triangular_solve - func: _triangular_solve_helper(Tensor self, Tensor A, bool upper, bool transpose, bool unitriangular) -> (Tensor, Tensor) use_c10_dispatcher: full @@ -5701,10 +6111,14 @@ CUDA: _triangular_solve_helper_cuda - func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + dispatch: + DefaultBackend: symeig_out - func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: symeig - func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) use_c10_dispatcher: full @@ -5726,10 +6140,14 @@ CUDA: legacy::cuda::_th_eig - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + dispatch: + DefaultBackend: svd_out - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: svd - func: _svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor, Tensor, Tensor) use_c10_dispatcher: full @@ -5739,10 +6157,14 @@ CUDA: _svd_helper_cuda - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: cholesky_out - func: cholesky(Tensor self, bool upper=False) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: cholesky - func: _cholesky_helper(Tensor self, bool upper) -> Tensor use_c10_dispatcher: full @@ -5752,10 +6174,14 @@ CUDA: _cholesky_helper_cuda - func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: cholesky_solve_out - func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: cholesky_solve - func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor use_c10_dispatcher: full @@ -5767,8 +6193,12 @@ - func: solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: solve - func: solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) + dispatch: + DefaultBackend: solve_out - func: _solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) use_c10_dispatcher: full @@ -5790,10 +6220,14 @@ CUDA: legacy::cuda::_th_potri - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + dispatch: + DefaultBackend: qr_out - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: qr - func: _qr_helper(Tensor self, bool some) -> (Tensor, Tensor) use_c10_dispatcher: full @@ -5842,10 +6276,14 @@ CUDA: _lu_with_info_cuda - func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: lu_solve_out - func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: lu_solve - func: _lu_solve_helper(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor use_c10_dispatcher: full @@ -5905,6 +6343,8 @@ - func: polygamma(int n, Tensor self) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: polygamma - func: erfinv(Tensor self) -> Tensor use_c10_dispatcher: full @@ -5927,10 +6367,14 @@ - func: i0(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: i0 - func: i0_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: i0_ - func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -5939,10 +6383,14 @@ - func: sign(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: sign - func: sign_(Tensor(a!) self) -> Tensor(a!) use_c10_dispatcher: full variants: method + dispatch: + DefaultBackend: sign_ - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -5960,6 +6408,8 @@ - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: dist - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -6043,6 +6493,8 @@ - func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method + dispatch: + DefaultBackend: hypot_ - func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -6056,6 +6508,8 @@ - func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) variants: method + dispatch: + DefaultBackend: nextafter_ - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) dispatch: @@ -6302,6 +6756,8 @@ - func: alias(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: method, function + dispatch: + DefaultBackend: alias - func: _index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) use_c10_dispatcher: full @@ -6694,10 +7150,14 @@ - func: l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn + dispatch: + DefaultBackend: l1_loss_out - func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: l1_loss - func: l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn @@ -6707,6 +7167,8 @@ - func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: l1_loss_backward - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -6854,20 +7316,30 @@ - func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: smooth_l1_loss_backward - func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn + dispatch: + DefaultBackend: soft_margin_loss_out - func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: soft_margin_loss - func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn + dispatch: + DefaultBackend: soft_margin_loss_backward_out - func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: soft_margin_loss_backward - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -6894,6 +7366,8 @@ - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: elu_ - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -7073,6 +7547,8 @@ - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + DefaultBackend: rrelu_with_noise_backward - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) python_module: nn @@ -8293,6 +8769,8 @@ - func: det(Tensor self) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: det # torch.outer, alias for torch.ger - func: outer(Tensor self, Tensor vec2) -> Tensor @@ -8304,8 +8782,12 @@ - func: ger(Tensor self, Tensor vec2) -> Tensor use_c10_dispatcher: full variants: function, method + dispatch: + DefaultBackend: ger - func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + dispatch: + DefaultBackend: ger_out - func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor python_module: linalg diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index a1f162f91471..9cca7b1319f8 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -748,6 +748,12 @@ def gen_variable_type_shard(out, aten_declarations, template_path, suffix, heade # See Note [Manual catchAll kernels] assert (declaration['name'] in MANUAL_CATCHALL) == declaration['manual_kernel_registration'] + # If you want to register a kernel to Autograd, you must make the op abstract. + # In other words, this op must have dispatch section in native_functions.yaml. + if declaration['name'] in MANUAL_AUTOGRAD_AND_TRACER or declaration['derivative']: + msg = (f'Did you add a formula for {declaration["name"]}(or its functional variant) in derivatives.yaml?' + f'If so please add a dispatch section for it with DefaultBackend in native_functions.yaml.') + assert declaration['abstract'], msg # Emit TraceType code if declaration['name'] not in MANUAL_TRACER: diff --git a/tools/codegen/gen.py b/tools/codegen/gen.py index 69e1aae3a014..785cab06a589 100644 --- a/tools/codegen/gen.py +++ b/tools/codegen/gen.py @@ -155,6 +155,9 @@ def cpp_string(s: str) -> str: # code we want. Target = Enum('Target', ('DEFINITION', 'DECLARATION', 'REGISTRATION')) +# Dispatch keywords in native_functions.yaml that support all backends. +KEYWORD_ALL_BACKENDS = ('DefaultBackend', 'Math') + # Generates {dispatch}Type.cpp and {dispatch}Type.h (e.g., CPUType.cpp # and CPUType.h). This function is also reused to implement per-operator # registration. It also generates TypeDefault.cpp and TypeDefault.h when @@ -273,7 +276,7 @@ def func(f: NativeFunction) -> Optional[str]: assert returns_type == dispatcher.returns_type(f.func.returns) dispatcher_args = dispatcher.arguments(f.func) dispatcher_args_types_str = ', '.join(map(lambda a: a.type, dispatcher_args)) - if dispatch is None or dispatch == 'Math' or dispatch == 'DefaultBackend': + if dispatch is None or dispatch in KEYWORD_ALL_BACKENDS: type_name = f'TypeDefault::{name}' else: type_name = f'{dispatch}Type::{name}' @@ -901,7 +904,7 @@ def compute_registration_declarations(f: NativeFunction) -> str: comment_data : Dict[str, str] = { 'schema': f'aten::{f.func}', 'dispatch': str(f.dispatch is not None), - 'math': str(f.dispatch is not None and 'Math' in f.dispatch) + 'default': str(f.dispatch is not None and any(k in f.dispatch for k in KEYWORD_ALL_BACKENDS)) } return f"""{returns_type} {name}({args_str}); // {json.dumps(comment_data)} """