This repository was archived by the owner on Aug 1, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 129
This repository was archived by the owner on Aug 1, 2025. It is now read-only.
test_bernoulli_self_cuda_bool: RuntimeError: "check_uniform_bounds" not implemented for 'Bool' #1796
Copy link
Copy link
Closed
Labels
Description
Repro:
PYTORCH_TEST_WITH_INDUCTOR=1 python test/test_torch.py -k test_bernoulli_self_cuda_bool
Error:
======================================================================
ERROR: test_bernoulli_self_cuda_bool (__main__.TestTorchDeviceTypeCUDA)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/fsx/users/binbao/pytorch/torch/testing/_internal/common_utils.py", line 2001, in wrapper
method(*args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/testing/_internal/common_utils.py", line 2001, in wrapper
method(*args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/testing/_internal/common_device_type.py", line 394, in instantiated_test
raise rte
File "/fsx/users/binbao/pytorch/torch/testing/_internal/common_device_type.py", line 381, in instantiated_test
result = test(self, **param_kwargs)
File "/fsx/users/binbao/pytorch/test/test_torch.py", line 1833, in test_bernoulli_self
@dtypes(*floating_types())
File "/fsx/users/binbao/pytorch/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/fsx/users/binbao/pytorch/functorch/_src/aot_autograd.py", line 870, in forward
return compiled_f(
File "/fsx/users/binbao/pytorch/functorch/_src/aot_autograd.py", line 856, in new_func
compiled_fn = create_aot_dispatcher_function(
File "/fsx/users/binbao/torchdynamo-tip/torchdynamo/utils.py", line 86, in time_wrapper
r = func(*args, **kwargs)
File "/fsx/users/binbao/pytorch/functorch/_src/aot_autograd.py", line 579, in create_aot_dispatcher_function
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
File "/fsx/users/binbao/pytorch/functorch/_src/aot_autograd.py", line 285, in aot_dispatch_base
fw_module = make_fx(flat_fn, aot_config.decompositions)(*flat_args)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 657, in wrapped
t = dispatch_trace(wrap_key(func, args, fx_tracer), tracer=fx_tracer, concrete_args=tuple(phs))
File "/fsx/users/binbao/pytorch/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 413, in dispatch_trace
graph = tracer.trace(root, concrete_args)
File "/fsx/users/binbao/pytorch/torch/_dynamo/eval_frame.py", line 157, in _fn
return fn(*args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/_symbolic_trace.py", line 739, in trace
(self.create_arg(fn(*args)),),
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 427, in wrapped
out = f(*tensors)
File "<string>", line 1, in <lambda>
File "/fsx/users/binbao/pytorch/functorch/_src/aot_autograd.py", line 818, in functional_call
out = Interpreter(mod).run(*args[params_len:], **kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/interpreter.py", line 130, in run
self.env[node] = self.run_node(node)
File "/fsx/users/binbao/pytorch/torch/fx/interpreter.py", line 171, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/interpreter.py", line 265, in call_method
return getattr(self_obj, target)(*args_tail, **kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 453, in __torch_dispatch__
return self.inner_torch_dispatch(func, types, args, kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 478, in inner_torch_dispatch
out = proxy_call(self, func, args, kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 226, in proxy_call
r = CURRENT_DECOMPOSITION_TABLE[func](*args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/_inductor/decomposition.py", line 326, in bernoulli_
return self.copy_(torch.rand_like(self) < p)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 453, in __torch_dispatch__
return self.inner_torch_dispatch(func, types, args, kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 478, in inner_torch_dispatch
out = proxy_call(self, func, args, kwargs)
File "/fsx/users/binbao/pytorch/torch/fx/experimental/proxy_tensor.py", line 317, in proxy_call
out = func(*args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/_ops.py", line 257, in __call__
return self._op(*args, **kwargs or {})
File "/fsx/users/binbao/pytorch/torch/_subclasses/fake_tensor.py", line 814, in __torch_dispatch__
op_impl_out = op_impl(self, func, *args, **kwargs)
File "/fsx/users/binbao/pytorch/torch/_subclasses/fake_tensor.py", line 307, in constructors
r = func(*args, **new_kwargs)
File "/fsx/users/binbao/pytorch/torch/_ops.py", line 257, in __call__
return self._op(*args, **kwargs or {})
RuntimeError: "check_uniform_bounds" not implemented for 'Bool'
While executing %bernoulli_ : [#users=0] = call_method[target=bernoulli_](args = (%empty, 0.5), kwargs = {})
Original traceback:
Module stack: {}
File "/fsx/users/binbao/pytorch/test/test_torch.py", line 1844, in test_bernoulli_self
t.bernoulli_(0.5)