The equivalent code works correctly with pytorch.
import numpy as np
from max.driver import Accelerator
from max.experimental import functional as F
from max.experimental.tensor import Tensor
x = Tensor(
np.array([0.0, 1.0, 4.0, 9.0, 16.0], dtype=np.float64),
device=Accelerator(0),
)
y = F.sqrt(x)
print(y)
Traceback (most recent call last):
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/engine/api.py", line 565, in load_all
_model = self._impl.compile_from_object(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ValueError: Graph compilation failed:
-:1:1: error: failed to lower module to LLVM IR for archive compilation, translate module to LLVMIR failed
error: could not find LLVM intrinsic: "llvm.nvvm.sqrt.approx.d" : !kgen.string
error: LLVM Translation failed for operation: llvm.call_intrinsic
-:1:1: error: The graph compiler could not elaborate the generated KGEN
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/projects/open_source/torch-max-backend/trying_stuff.py", line 13, in <module>
y = F.sqrt(x)
^^^^^^^^^
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/experimental/functional.py", line 205, in wrapped
with contextlib.ExitStack() as stack:
^^^^^^^^^^^^^^^^^^^^^^
File "/root/.local/share/uv/python/cpython-3.12.11-linux-x86_64-gnu/lib/python3.12/contextlib.py", line 610, in __exit__
raise exc_details[1]
File "/root/.local/share/uv/python/cpython-3.12.11-linux-x86_64-gnu/lib/python3.12/contextlib.py", line 595, in __exit__
if cb(*exc_details):
^^^^^^^^^^^^^^^^
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/experimental/realization_context.py", line 636, in __exit__
F._run(self.realize_all())
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/experimental/functional.py", line 98, in _run
return asyncio.run(coro)
^^^^^^^^^^^^^^^^^
File "/root/.local/share/uv/python/cpython-3.12.11-linux-x86_64-gnu/lib/python3.12/asyncio/runners.py", line 195, in run
return runner.run(main)
^^^^^^^^^^^^^^^^
File "/root/.local/share/uv/python/cpython-3.12.11-linux-x86_64-gnu/lib/python3.12/asyncio/runners.py", line 118, in run
return self._loop.run_until_complete(task)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/.local/share/uv/python/cpython-3.12.11-linux-x86_64-gnu/lib/python3.12/asyncio/base_events.py", line 691, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/experimental/realization_context.py", line 479, in realize_all
model = _load_eager_model(graph)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/experimental/realization_context.py", line 316, in _load_eager_model
return session.load(graph)
^^^^^^^^^^^^^^^^^^^
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/engine/api.py", line 474, in load
models = self.load_all(
^^^^^^^^^^^^^^
File "/projects/open_source/torch-max-backend/.venv/lib/python3.12/site-packages/max/engine/api.py", line 571, in load_all
raise RuntimeError(
RuntimeError: Failed to compile the model. Please file an issue, all models should be correct by construction and this error should have been caught during construction.
For more detailed failure information run with the environment variable `MODULAR_MAX_DEBUG=True`.
Bug description
The equivalent code works correctly with pytorch.
Steps to reproduce
System information