Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions aten/src/ATen/functorch/BatchRulesRandomness.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -279,8 +279,8 @@ struct RandomBatchRuleHelper<F, Func, typelist<T1, T...>> {
};

template <typename F, F Func, typename... T>
Tensor rand_int_wrapper(SymIntArrayRef shape, int64_t high, T... extra_args) {
return Func(high, shape, std::forward<T>(extra_args)...);
Tensor rand_int_wrapper(SymIntArrayRef shape, c10::SymInt high, T... extra_args) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: probably ok w/o c10 namespace

return Func(high, std::move(shape), std::forward<T>(extra_args)...);
}

template <typename A, A a, typename C>
Expand All @@ -298,10 +298,10 @@ struct RandIntBatchRuleHelper;

template <typename F, F Func, typename T1, typename T2, typename... T>
struct RandIntBatchRuleHelper<F, Func, typelist<T1, T2, T...>> {
static Tensor apply(int64_t high, SymIntArrayRef shape, T... extra_args) {
static Tensor apply(c10::SymInt high, SymIntArrayRef shape, T... extra_args) {
return random_batching_rule<decltype(&rand_int_wrapper<F, Func, T...>),
&rand_int_wrapper<F, Func, T...>,
int64_t, T...>(shape, high, std::forward<T>(extra_args)...);
c10::SymInt, T...>(shape, std::move(high), std::forward<T>(extra_args)...);
}
};

Expand All @@ -318,7 +318,7 @@ struct RandTwoLeadingScalarsBatchRuleHelper<F, Func, typelist<T0, T1, T2, T...>>
static Tensor apply(T0 scalar0, T1 scalar1, SymIntArrayRef shape, T... extra_args) {
return random_batching_rule<decltype(&rand_int_low_wrapper<F, Func, T0, T1, T...>),
&rand_int_low_wrapper<F, Func, T0, T1, T...>,
int64_t, int64_t, T...>(shape, scalar0, scalar1, std::forward<T>(extra_args)...);
T0, T1, T...>(shape, scalar0, scalar1, std::forward<T>(extra_args)...);
}
};

Expand Down
28 changes: 14 additions & 14 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4361,55 +4361,55 @@
CompositeExplicitAutograd: rand_like
autogen: rand_like.out

- func: randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint

- func: randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint

- func: randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint

- func: randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint

- func: randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- func: randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out

- func: randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- func: randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out

- func: randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
- func: randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out

- func: randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- func: randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out

- func: randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- func: randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: randint_like
autogen: randint_like.out

- func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
- func: randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
Expand Down Expand Up @@ -4457,22 +4457,22 @@
CompositeExplicitAutograd, CompositeImplicitAutogradNestedTensor: randn_like
autogen: randn_like.out

- func: randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randperm

- func: randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randperm

- func: randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
- func: randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randperm_out

- func: randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
- func: randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CPU: randperm_out_cpu
Expand Down