Skip to content

Commit

Permalink
Revert "fix Invalid call to aoti_torch_tensor_copy_ #123039 (#124037)"
Browse files Browse the repository at this point in the history
This reverts commit 6e24cc0.

Reverted #124037 on behalf of https://github.com/jeanschmidt due to seems to have introduced a regression in pull / linux-focal-cuda12.1-py3.10-gcc9 / test (default, 3, 5, linux.4xlarge.nvidia.gpu) ([comment](#124037 (comment)))
  • Loading branch information
pytorchmergebot committed Apr 22, 2024
1 parent 3af1244 commit 34bce27
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 12 deletions.
4 changes: 3 additions & 1 deletion test/inductor/test_cuda_cpp_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,9 @@ class DynamicShapesCudaWrapperCudaTests(InductorTestCase):
test_failures_cuda_wrapper[
f"{test_name}_dynamic_shapes"
] = test_torchinductor.TestFailure(("cuda_wrapper",), is_skip=False)
skip_list = []
skip_list = [
"test_multi_device_cuda",
]
for test_name in skip_list:
test_failures_cuda_wrapper[test_name] = test_torchinductor.TestFailure(
("cuda_wrapper",), is_skip=True
Expand Down
17 changes: 6 additions & 11 deletions torch/_inductor/codegen/cpp_wrapper_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -895,11 +895,9 @@ def codegen_scalar_to_tensor(self, output: str):
@cache_on_self
def get_output_refs(self):
return [
(
f"torch::tensor({x.codegen_reference(self.wrapper_call)})"
if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible
else x.codegen_reference(self.wrapper_call)
)
f"torch::tensor({x.codegen_reference(self.wrapper_call)})"
if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible
else x.codegen_reference(self.wrapper_call)
for x in V.graph.graph_outputs
]

Expand Down Expand Up @@ -1099,11 +1097,9 @@ def generate_end(self, result):
outputs_str = "output_tensors"
else:
outputs = [
(
f"output_tensors[{i}]"
if self.output_is_tensor[i]
else f"output_tensors[{i}].item()"
)
f"output_tensors[{i}]"
if self.output_is_tensor[i]
else f"output_tensors[{i}].item()"
for i in range(len(V.graph.graph_outputs))
]
outputs_str = f"[{', '.join(outputs)}]"
Expand Down Expand Up @@ -1398,7 +1394,6 @@ def can_stack_allocate_buffer(self, buffer):
and ir.is_contiguous_strides_for_shape(
buffer.get_stride(), buffer.get_size()
)
and not buffer.is_extern()
)

def make_buffer_free(self, buffer):
Expand Down

0 comments on commit 34bce27

Please sign in to comment.