-
Notifications
You must be signed in to change notification settings - Fork 376
Open
Labels
bugSomething isn't workingSomething isn't working
Description
pipe = Flux2Pipeline.from_pretrained("black-forest-labs/FLUX.2-dev", torch_dtype=torch.bfloat16)
pipe.to("cuda")
dit = pipe.transformer
compiled_dit = torch.compile(
dit,
backend="tensorrt",
options={
"min_block_size": 1,
"truncate_long_and_double": True,
}
)
pipe.transformer = compiled_dit
# Depending on the variant being used, the pipeline call will slightly vary.
# Refer to the pipeline documentation for more details.
with torch_tensorrt.dynamo.Debugger(logging_dir="logs"):
prompt = "A cat holding a sign that says hello world"
image = pipe(prompt=prompt, num_inference_steps=50, guidance_scale=2.5).images[0]
image.save("flux.png")
21:49:37 - WARNING - TRT conversion failed on the subgraph. See trace above. Returning GraphModule forward instead.
Traceback (most recent call last):
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/backend/backends.py", line 164, in _pretraced_backend
trt_compiled = compile_module(
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/_compiler.py", line 940, in compile_module
trt_module = convert_module(
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/_conversion.py", line 88, in convert_module
interpreter_result = interpret_module_to_result(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/_conversion.py", line 67, in interpret_module_to_result
interpreter_result = interpreter.run()
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/_TRTInterpreter.py", line 735, in run
self._construct_trt_network_def()
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/_TRTInterpreter.py", line 413, in _construct_trt_network_def
super().run()
File "/usr/local/lib/python3.12/dist-packages/torch/fx/interpreter.py", line 174, in run
self.env[node] = self.run_node(node)
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/_TRTInterpreter.py", line 798, in run_node
trt_node: torch.fx.Node = super().run_node(n)
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch/fx/interpreter.py", line 256, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/_TRTInterpreter.py", line 905, in call_function
return converter(self.ctx, target, args, kwargs, self._cur_node_name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/aten_ops_converters.py", line 1946, in aten_ops_mul
return impl.elementwise.mul(
^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py", line 477, in mul
return convert_binary_elementwise(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/impl/elementwise/base.py", line 134, in convert_binary_elementwise
rhs_val = get_trt_tensor(ctx, rhs_val, f"{name}_rhs", rhs_dtype)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/converter_utils.py", line 533, in get_trt_tensor
return create_constant(
^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/converter_utils.py", line 486, in create_constant
trt_weights = to_trt_weights(ctx, torch_value, name, "CONSTANT", "CONSTANT")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/torch_tensorrt/dynamo/conversion/converter_utils.py", line 406, in to_trt_weights
return trt.Weights(dtype, value.data_ptr(), count)
^^^^^^^^^^^^^^^^
RuntimeError: Cannot access data pointer of Tensor (e.g. FakeTensor, FunctionalTensor). If you're using torch.compile/export/fx, it is likely that we are erroneously tracing into a custom kernel. To fix this, please wrap the custom kernel into an opaque custom op. Please see the following for details: https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html
While executing %mul_14 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_reshape_copy_5, %_frozen_param2), kwargs = {})
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't working