diff --git a/tests/pytorch/distributed/test_cast_master_weights_to_fp8.py b/tests/pytorch/distributed/test_cast_master_weights_to_fp8.py index f60986a92..a8fca0577 100644 --- a/tests/pytorch/distributed/test_cast_master_weights_to_fp8.py +++ b/tests/pytorch/distributed/test_cast_master_weights_to_fp8.py @@ -1,3 +1,5 @@ +# This file was modified for portability to AMDGPU. +# Copyright (c) 2026, Advanced Micro Devices, Inc. All rights reserved. # Copyright (c) 2022-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # See LICENSE for license information. @@ -717,7 +719,9 @@ def run_parallel_tests() -> None: @pytest.mark.parametrize("world_size", [2]) def test_cast_master_weights_to_fp8(world_size: int) -> None: """Launch parallel job that runs parallel tests""" - python_exe = pathlib.Path(sys.executable).resolve() + # ROCm: Use executable as-is; do not resolve() or a venv symlink may point to system + # Python which does not have torch/site-packages. + python_exe = pathlib.Path(sys.executable) current_file = pathlib.Path(__file__).resolve() command = [ python_exe,